version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.6 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import itertools
import math
import torch
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from pyro.distributions.torch_distribution import TorchDistribution
class SpanningTree(TorchDistribution):
"""
Distribution over spanning trees on a fixed number ``V`` of vertices.
A tree is represented as :class:`torch.LongTensor` ``edges`` of shape
``(V-1,2)`` satisfying the following properties:
1. The edges constitute a tree, i.e. are connected and cycle free.
2. Each edge ``(v1,v2) = edges[e]`` is sorted, i.e. ``v1 < v2``.
3. The entire tensor is sorted in colexicographic order.
Use :func:`validate_edges` to verify `edges` are correctly formed.
The ``edge_logits`` tensor has one entry for each of the ``V*(V-1)//2``
edges in the complete graph on ``V`` vertices, where edges are each sorted
and the edge order is colexicographic::
(0,1), (0,2), (1,2), (0,3), (1,3), (2,3), (0,4), (1,4), (2,4), ...
This ordering corresponds to the size-independent pairing function::
k = v1 + v2 * (v2 - 1) // 2
where ``k`` is the rank of the edge ``(v1,v2)`` in the complete graph.
To convert a matrix of edge logits to the linear representation used here::
assert my_matrix.shape == (V, V)
i, j = make_complete_graph(V)
edge_logits = my_matrix[i, j]
:param torch.Tensor edge_logits: A tensor of length ``V*(V-1)//2``
containing logits (aka negative energies) of all edges in the complete
graph on ``V`` vertices. See above comment for edge ordering.
:param dict sampler_options: An optional dict of sampler options including:
``mcmc_steps`` defaulting to a single MCMC step (which is pretty good);
``initial_edges`` defaulting to a cheap approximate sample;
``backend`` one of "python" or "cpp", defaulting to "python".
"""
arg_constraints = {'edge_logits': constraints.real}
support = constraints.nonnegative_integer
has_enumerate_support = True
def __init__(self, edge_logits, sampler_options=None, validate_args=None):
if edge_logits.is_cuda:
raise NotImplementedError("SpanningTree does not support cuda tensors")
K = len(edge_logits)
V = int(round(0.5 + (0.25 + 2 * K)**0.5))
assert K == V * (V - 1) // 2
E = V - 1
event_shape = (E, 2)
batch_shape = ()
self.edge_logits = edge_logits
super().__init__(batch_shape, event_shape, validate_args=validate_args)
if self._validate_args:
if edge_logits.shape != (K,):
raise ValueError("Expected edge_logits of shape ({},), but got shape {}"
.format(K, edge_logits.shape))
self.num_vertices = V
self.sampler_options = {} if sampler_options is None else sampler_options
def validate_edges(self, edges):
"""
Validates a batch of ``edges`` tensors, as returned by :meth:`sample` or
:meth:`enumerate_support` or as input to :meth:`log_prob()`.
:param torch.LongTensor edges: A batch of edges.
:raises: ValueError
:returns: None
"""
if edges.shape[-2:] != self.event_shape:
raise ValueError("Invalid edges shape: {}".format(edges.shape))
# Verify canonical ordering.
if not ((0 <= edges) & (edges < self.num_vertices)).all():
raise ValueError("Invalid vertex ids:\n{}".format(edges))
if not (edges[..., 0] < edges[..., 1]).all():
raise ValueError("Vertices are not sorted in each edge:\n{}".format(edges))
if not ((edges[..., :-1, 1] < edges[..., 1:, 1]) |
((edges[..., :-1, 1] == edges[..., 1:, 1]) &
(edges[..., :-1, 0] < edges[..., 1:, 0]))).all():
raise ValueError("Edges are not sorted colexicographically:\n{}".format(edges))
# Verify tree property, i.e. connectivity.
V = self.num_vertices
for i in itertools.product(*map(range, edges.shape[:-2])):
edges_i = edges[i]
connected = torch.eye(V, dtype=torch.float)
connected[edges_i[:, 0], edges_i[:, 1]] = 1
connected[edges_i[:, 1], edges_i[:, 0]] = 1
for i in range(int(math.ceil(V ** 0.5))):
connected = connected.mm(connected).clamp_(max=1)
if not connected.min() > 0:
raise ValueError("Edges do not constitute a tree:\n{}".format(edges_i))
@lazy_property
def log_partition_function(self):
# By Kirchoff's matrix-tree theorem, the partition function is the
# determinant of a truncated version of the graph Laplacian matrix. We
# use a Cholesky decomposition to compute the log determinant.
# See https://en.wikipedia.org/wiki/Kirchhoff%27s_theorem
V = self.num_vertices
v1, v2 = make_complete_graph(V).unbind(0)
logits = self.edge_logits.new_full((V, V), -math.inf)
logits[v1, v2] = self.edge_logits
logits[v2, v1] = self.edge_logits
log_diag = logits.logsumexp(-1)
# Numerically stabilize so that laplacian has 1's on the diagonal.
shift = 0.5 * log_diag
laplacian = torch.eye(V) - (logits - shift - shift[:, None]).exp()
truncated = laplacian[:-1, :-1]
try:
import gpytorch
log_det = gpytorch.lazy.NonLazyTensor(truncated).logdet()
except ImportError:
log_det = torch.cholesky(truncated).diag().log().sum() * 2
return log_det + log_diag[:-1].sum()
def log_prob(self, edges):
if self._validate_args:
self.validate_edges(edges)
v1 = edges[..., 0]
v2 = edges[..., 1]
k = v1 + v2 * (v2 - 1) // 2
return self.edge_logits[k].sum(-1) - self.log_partition_function
def sample(self, sample_shape=torch.Size()):
"""
This sampler is implemented using MCMC run for a small number of steps
after being initialized by a cheap approximate sampler. This sampler is
approximate and cubic time. This is faster than the classic
Aldous-Broder sampler [1,2], especially for graphs with large mixing
time. Recent research [3,4] proposes samplers that run in
sub-matrix-multiply time but are more complex to implement.
**References**
[1] `Generating random spanning trees`
Andrei Broder (1989)
[2] `The Random Walk Construction of Uniform Spanning Trees and Uniform Labelled Trees`,
David J. Aldous (1990)
[3] `Sampling Random Spanning Trees Faster than Matrix Multiplication`,
David Durfee, Rasmus Kyng, John Peebles, Anup B. Rao, Sushant Sachdeva
(2017) https://arxiv.org/abs/1611.07451
[4] `An almost-linear time algorithm for uniform random spanning tree generation`,
Aaron Schild (2017) https://arxiv.org/abs/1711.06455
"""
if sample_shape:
raise NotImplementedError("SpanningTree does not support batching")
edges = sample_tree(self.edge_logits, **self.sampler_options)
assert edges.dim() >= 2 and edges.shape[-2:] == self.event_shape
return edges
def enumerate_support(self, expand=True):
"""
This is implemented for trees with up to 6 vertices (and 5 edges).
"""
trees = enumerate_spanning_trees(self.num_vertices)
return torch.tensor(trees, dtype=torch.long)
################################################################################
# Sampler implementation.
################################################################################
_cpp_module = None
def _get_cpp_module():
"""
JIT compiles the cpp_spanning_tree module.
"""
global _cpp_module
if _cpp_module is None:
import os
from torch.utils.cpp_extension import load
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "spanning_tree.cpp")
_cpp_module = load(name="cpp_spanning_tree",
sources=[path],
extra_cflags=['-O2'],
verbose=True)
return _cpp_module
def make_complete_graph(num_vertices, backend="python"):
"""
Constructs a complete graph.
The pairing function is: ``k = v1 + v2 * (v2 - 1) // 2``
:param int num_vertices: Number of vertices.
:returns: a 2 x K grid of (vertex, vertex) pairs.
"""
if backend == "python":
return _make_complete_graph(num_vertices)
elif backend == "cpp":
return _get_cpp_module().make_complete_graph(num_vertices)
else:
raise ValueError("unknown backend: {}".format(repr(backend)))
def _make_complete_graph(num_vertices):
if num_vertices < 2:
raise ValueError('PyTorch cannot handle zero-sized multidimensional tensors')
V = num_vertices
K = V * (V - 1) // 2
v1 = torch.arange(V)
v2 = torch.arange(V).unsqueeze(-1)
v1, v2 = torch.broadcast_tensors(v1, v2)
v1 = v1.contiguous().view(-1)
v2 = v2.contiguous().view(-1)
mask = (v1 < v2)
grid = torch.stack((v1[mask], v2[mask]))
assert grid.shape == (2, K)
return grid
def _remove_edge(grid, edge_ids, neighbors, components, e):
"""
Remove an edge from a spanning tree.
"""
k = edge_ids[e]
v1 = grid[0, k].item()
v2 = grid[1, k].item()
neighbors[v1].remove(v2)
neighbors[v2].remove(v1)
components[v1] = 1
pending = [v1]
while pending:
v1 = pending.pop()
for v2 in neighbors[v1]:
if not components[v2]:
components[v2] = 1
pending.append(v2)
return k
def _add_edge(grid, edge_ids, neighbors, components, e, k):
"""
Add an edge connecting two components to create a spanning tree.
"""
edge_ids[e] = k
v1 = grid[0, k].item()
v2 = grid[1, k].item()
neighbors[v1].add(v2)
neighbors[v2].add(v1)
components.fill_(0)
def _find_valid_edges(components, valid_edge_ids):
"""
Find all edges between two components in a complete undirected graph.
:param components: A [V]-shaped array of boolean component ids. This
assumes there are exactly two nonemtpy components.
:param valid_edge_ids: An uninitialized array where output is written. On
return, the subarray valid_edge_ids[:end] will contain edge ids k for all
valid edges.
:returns: The number of valid edges found.
"""
k = 0
end = 0
for v2, c2 in enumerate(components):
for v1 in range(v2):
if c2 ^ components[v1]:
valid_edge_ids[end] = k
end += 1
k += 1
return end
@torch.no_grad()
def _sample_tree_mcmc(edge_logits, edges):
if len(edges) <= 1:
return edges
E = len(edges)
V = E + 1
K = V * (V - 1) // 2
grid = make_complete_graph(V)
# Each of E edges in the tree is stored as an id k in [0, K) indexing into
# the complete graph. The id of an edge (v1,v2) is k = v1+v2*(v2-1)/2.
edge_ids = torch.empty(E, dtype=torch.long)
# This maps each vertex to the set of its neighboring vertices.
neighbors = {v: set() for v in range(V)}
# This maps each vertex to its connected component id (0 or 1).
components = torch.zeros(V, dtype=torch.bool)
for e in range(E):
v1, v2 = map(int, edges[e])
assert v1 < v2
edge_ids[e] = v1 + v2 * (v2 - 1) // 2
neighbors[v1].add(v2)
neighbors[v2].add(v1)
# This stores ids of edges that are valid candidates for Gibbs moves.
valid_edges_buffer = torch.empty(K, dtype=torch.long)
# Cycle through all edges in a random order.
for e in torch.randperm(E):
e = int(e)
# Perform a single-site Gibbs update by moving this edge elsewhere.
k = _remove_edge(grid, edge_ids, neighbors, components, e)
num_valid_edges = _find_valid_edges(components, valid_edges_buffer)
valid_edge_ids = valid_edges_buffer[:num_valid_edges]
valid_logits = edge_logits[valid_edge_ids]
valid_probs = (valid_logits - valid_logits.max()).exp()
total_prob = valid_probs.sum()
if total_prob > 0:
sample = torch.multinomial(valid_probs, 1)[0]
k = valid_edge_ids[sample]
_add_edge(grid, edge_ids, neighbors, components, e, k)
# Convert edge ids to a canonical list of pairs.
edge_ids = edge_ids.sort()[0]
edges = torch.empty((E, 2), dtype=torch.long)
edges[:, 0] = grid[0, edge_ids]
edges[:, 1] = grid[1, edge_ids]
return edges
def sample_tree_mcmc(edge_logits, edges, backend="python"):
"""
Sample a random spanning tree of a dense weighted graph using MCMC.
This uses Gibbs sampling on edges. Consider E undirected edges that can
move around a graph of ``V=1+E`` vertices. The edges are constrained so
that no two edges can span the same pair of vertices and so that the edges
must form a spanning tree. To Gibbs sample, chose one of the E edges at
random and move it anywhere else in the graph. After we remove the edge,
notice that the graph is split into two connected components. The
constraints imply that the edge must be replaced so as to connect the two
components. Hence to Gibbs sample, we collect all such bridging
(vertex,vertex) pairs and sample from them in proportion to
``exp(edge_logits)``.
:param torch.Tensor edge_logits: A length-K array of nonnormalized log
probabilities.
:param torch.Tensor edges: An E x 2 tensor of initial edges in the form
of (vertex,vertex) pairs. Each edge should be sorted and the entire
tensor should be lexicographically sorted.
:returns: An E x 2 tensor of edges in the form of (vertex,vertex) pairs.
Each edge should be sorted and the entire tensor should be
lexicographically sorted.
:rtype: torch.Tensor
"""
if backend == "python":
return _sample_tree_mcmc(edge_logits, edges)
elif backend == "cpp":
return _get_cpp_module().sample_tree_mcmc(edge_logits, edges)
else:
raise ValueError("unknown backend: {}".format(repr(backend)))
@torch.no_grad()
def _sample_tree_approx(edge_logits):
K = len(edge_logits)
V = int(round(0.5 + (0.25 + 2 * K)**0.5))
assert K == V * (V - 1) // 2
E = V - 1
grid = make_complete_graph(V)
# Each of E edges in the tree is stored as an id k in [0, K) indexing into
# the complete graph. The id of an edge (v1,v2) is k = v1+v2*(v2-1)/2.
edge_ids = torch.empty((E,), dtype=torch.long)
# This maps each vertex to whether it is a member of the cumulative tree.
components = torch.zeros(V, dtype=torch.bool)
# Sample the first edge at random.
probs = (edge_logits - edge_logits.max()).exp()
k = torch.multinomial(probs, 1)[0]
components[grid[:, k]] = 1
edge_ids[0] = k
# Sample edges connecting the cumulative tree to a new leaf.
for e in range(1, E):
c1, c2 = components[grid]
mask = (c1 != c2)
valid_logits = edge_logits[mask]
probs = (valid_logits - valid_logits.max()).exp()
k = mask.nonzero(as_tuple=False)[torch.multinomial(probs, 1)[0]]
components[grid[:, k]] = 1
edge_ids[e] = k
# Convert edge ids to a canonical list of pairs.
edge_ids = edge_ids.sort()[0]
edges = torch.empty((E, 2), dtype=torch.long)
edges[:, 0] = grid[0, edge_ids]
edges[:, 1] = grid[1, edge_ids]
return edges
def sample_tree_approx(edge_logits, backend="python"):
"""
Approximately sample a random spanning tree of a dense weighted graph.
This is mainly useful for initializing an MCMC sampler.
:param torch.Tensor edge_logits: A length-K array of nonnormalized log
probabilities.
:returns: An E x 2 tensor of edges in the form of (vertex,vertex) pairs.
Each edge should be sorted and the entire tensor should be
lexicographically sorted.
:rtype: torch.Tensor
"""
if backend == "python":
return _sample_tree_approx(edge_logits)
elif backend == "cpp":
return _get_cpp_module().sample_tree_approx(edge_logits)
else:
raise ValueError("unknown backend: {}".format(repr(backend)))
def sample_tree(edge_logits, init_edges=None, mcmc_steps=1, backend="python"):
edges = init_edges
if edges is None:
edges = sample_tree_approx(edge_logits, backend=backend)
for step in range(mcmc_steps):
edges = sample_tree_mcmc(edge_logits, edges, backend=backend)
return edges
################################################################################
# Enumeration implementation.
################################################################################
# See https://oeis.org/A000272
NUM_SPANNING_TREES = [
1, 1, 1, 3, 16, 125, 1296, 16807, 262144, 4782969, 100000000, 2357947691,
61917364224, 1792160394037, 56693912375296, 1946195068359375,
72057594037927936, 2862423051509815793, 121439531096594251776,
5480386857784802185939,
]
# These topologically distinct sets of trees generate sets of all trees
# under permutation of vertices. See https://oeis.org/A000055
_TREE_GENERATORS = [
[[]],
[[]],
[[(0, 1)]],
[[(0, 1), (0, 2)]],
[
[(0, 1), (0, 2), (0, 3)],
[(0, 1), (1, 2), (2, 3)],
],
[
[(0, 1), (0, 2), (0, 3), (0, 4)],
[(0, 1), (0, 2), (0, 3), (1, 4)],
[(0, 1), (1, 2), (2, 3), (3, 4)],
],
[
[(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)],
[(0, 1), (0, 2), (0, 3), (0, 4), (1, 5)],
[(0, 1), (0, 2), (0, 3), (1, 4), (4, 5)],
[(0, 1), (0, 2), (0, 3), (2, 4), (3, 5)],
[(0, 1), (0, 2), (0, 3), (3, 4), (3, 5)],
[(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)],
],
]
def _permute_tree(perm, tree):
edges = [tuple(sorted([perm[u], perm[v]])) for (u, v) in tree]
edges.sort(key=lambda uv: (uv[1], uv[0]))
return tuple(edges)
def _close_under_permutations(V, tree_generators):
vertices = list(range(V))
trees = []
for tree in tree_generators:
trees.extend(set(_permute_tree(perm, tree)
for perm in itertools.permutations(vertices)))
trees.sort()
return trees
def enumerate_spanning_trees(V):
"""
Compute the set of spanning trees on V vertices.
"""
if V >= len(_TREE_GENERATORS):
raise NotImplementedError(
"enumerate_spanning_trees() is implemented only for trees with up to {} vertices"
.format(len(_TREE_GENERATORS) - 1))
all_trees = _close_under_permutations(V, _TREE_GENERATORS[V])
assert len(all_trees) == NUM_SPANNING_TREES[V]
return all_trees
| [
"torch.zeros",
"torch.Size",
"torch.cholesky",
"torch.stack",
"torch.arange",
"torch.no_grad",
"torch.broadcast_tensors",
"torch.randperm",
"torch.multinomial",
"torch.tensor",
"torch.eye",
"torch.utils.cpp_extension.load",
"torch.empty"
] | 1.6.0 | kashif/pyro | b65b329d8b851c7402acaef9c176a8964caadaf3 |
1.6 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import torch
from pyro.distributions.transforms import Transform
from pyro.distributions import constraints
class OrderedTransform(Transform):
"""
Transforms a real vector into an ordered vector.
Specifically, enforces monotonically increasing order on the last dimension
of a given tensor via the transformation :math:`y_0 = x_0`,
:math:`y_i = \\sum_{1 \\le j \\le i} \\exp(x_i)`
"""
domain = constraints.real_vector
codomain = constraints.ordered_vector
bijective = True
sign = +1
event_dim = 1
def _call(self, x):
z = torch.cat([x[..., :1], x[..., 1:].exp()], dim=-1)
return torch.cumsum(z, dim=-1)
def _inverse(self, y):
x = (y[..., 1:] - y[..., :-1]).log()
return torch.cat([y[..., :1], x], dim=-1)
def log_abs_det_jacobian(self, x, y):
return torch.sum(x[..., 1:], dim=-1)
| [
"torch.sum",
"torch.cat",
"torch.cumsum"
] | 1.6.0 | kashif/pyro | b65b329d8b851c7402acaef9c176a8964caadaf3 |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
import warnings
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
import mmf
import pytorch_lightning as pl
import torch
from mmf.common.meter import Meter
from mmf.common.registry import registry
from mmf.datasets.iteration_strategies import (
ConstantIterationStrategy,
IterationStrategy,
SizeProportionalIterationStrategy,
)
from mmf.datasets.processors.processors import Processor
from mmf.utils.configuration import Configuration, get_global_config
from mmf.utils.distributed import is_dist_initialized, is_master, is_xla, synchronize
from mmf.utils.general import get_optimizer_parameters
from omegaconf import DictConfig, OmegaConf
try:
import torch_xla.core.xla_model as xm # noqa
import torch_xla.distributed.parallel_loader as xla_pl # noqa
except ImportError:
xm = None
ProcessorDict = Dict[str, Processor]
logger = logging.getLogger(__name__)
def build_config(configuration: Configuration, *args, **kwargs) -> DictConfig:
"""Builder function for config. Freezes the configuration and registers
configuration object and config DictConfig object to registry.
Args:
configuration (Configuration): Configuration object that will be
used to create the config.
Returns:
(DictConfig): A config which is of type omegaconf.DictConfig
"""
configuration.freeze()
config = configuration.get_config()
registry.register("config", config)
registry.register("configuration", configuration)
return config
def build_trainer(config: DictConfig) -> Any:
"""Builder function for creating a trainer class. Trainer class name
is picked from the config.
Args:
config (DictConfig): Configuration that will be used to create
the trainer.
Returns:
(BaseTrainer): A trainer instance
"""
trainer_type = config.training.trainer
trainer_cls = registry.get_trainer_class(trainer_type)
trainer_obj = trainer_cls(config)
return trainer_obj
def build_model(
config: Union[DictConfig, "mmf.models.base_model.BaseModel.Config"]
) -> "mmf.models.base_model.BaseModel":
from mmf.models.base_model import BaseModel
# If it is not an OmegaConf object, create the object
if not isinstance(config, DictConfig) and isinstance(config, BaseModel.Config):
config = OmegaConf.structured(config)
model_name = config.model
model_class = registry.get_model_class(model_name)
if model_class is None:
raise RuntimeError(f"No model registered for name: {model_name}")
model = model_class(config)
if hasattr(model, "build"):
"""Model build involves checkpoint loading
If the checkpoint is not available the underlying
methods try to download it.
Let master build the model (download the checkpoints) while
other ranks wait for the sync message
Once the master has downloaded the checkpoint and built the
model it sends the sync message, completing the synchronization
now other cores can proceed to build the model
using already downloaded checkpoint.
"""
if is_master():
model.load_requirements()
model.build()
synchronize()
else:
synchronize()
model.build()
model.init_losses()
return model
def build_dataset(
dataset_key: str, config=None, dataset_type="train"
) -> torch.utils.data.Dataset:
"""Builder function for creating a dataset. If dataset_key is passed
the dataset is created from default config of the dataset and thus is
disable config even if it is passed. Otherwise, we use MultiDatasetLoader to
build and return an instance of dataset based on the config
Args:
dataset_key (str): Key of dataset to build.
config (DictConfig, optional): Configuration that will be used to create
the dataset. If not passed, dataset's default config will be used.
Defaults to {}.
dataset_type (str, optional): Type of the dataset to build, train|val|test.
Defaults to "train".
Returns:
(torch.utils.data.Dataset): A dataset instance of type torch Dataset
"""
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
from mmf.utils.configuration import load_yaml_with_defaults
datamodule_instance = build_datamodule(dataset_key)
# If config is not provided, we take it from default one
if not config:
config_path = datamodule_instance.config_path()
if config_path is None:
# If config path wasn't defined, send an empty config path
# but don't force dataset to define a config
warnings.warn(
f"Config path not defined for {dataset_key}, "
+ "continuing with empty config"
)
config = OmegaConf.create()
else:
config = load_yaml_with_defaults(config_path)
config = OmegaConf.select(config, f"dataset_config.{dataset_key}")
if config is None:
config = OmegaConf.create()
OmegaConf.set_struct(config, True)
elif dataset_key in config:
# Handle Global config
config = config[dataset_key]
datamodule_instance.build_dataset(config)
dataset = datamodule_instance.load_dataset(config, dataset_type)
if hasattr(datamodule_instance, "update_registry_for_model"):
datamodule_instance.update_registry_for_model(config)
return dataset
# TODO: move dataset_type enum to typings
def build_datasets(
dataset_list: List[str], dataset_config: DictConfig, dataset_type="train"
) -> List[torch.utils.data.Dataset]:
datasets = []
for dataset in dataset_list:
if dataset in dataset_config:
dataset_config = dataset_config[dataset]
else:
warnings.warn(
f"Dataset {dataset} is missing from dataset_config"
+ " in config. Proceeding with empty config."
)
dataset_config = OmegaConf.create()
dataset_instance = build_dataset(dataset, dataset_config, dataset_type)
if dataset_instance is None:
continue
datasets.append(dataset_instance)
return datasets
def build_datamodule(dataset_key) -> pl.LightningDataModule:
dataset_builder = registry.get_builder_class(dataset_key)
assert dataset_builder, (
f"Key {dataset_key} doesn't have a registered " + "dataset builder"
)
builder_instance: pl.LightningDataModule = dataset_builder()
return builder_instance
def build_multiple_datamodules(
dataset_list: List[str], all_dataset_config: DictConfig
) -> Dict[str, pl.LightningDataModule]:
datamodules: Dict[str, pl.LightningDataModule] = {}
for dataset in dataset_list:
datamodule_instance = build_datamodule(dataset)
if dataset in all_dataset_config:
dataset_config = all_dataset_config[dataset]
else:
warnings.warn(
f"Dataset {dataset} is missing from dataset_config"
+ " in config. Proceeding with empty config."
)
dataset_config = OmegaConf.create()
if is_master():
datamodule_instance.prepare_data(dataset_config)
synchronize()
datamodule_instance.setup(config=dataset_config)
if hasattr(datamodule_instance, "update_registry_for_model"):
datamodule_instance.update_registry_for_model(dataset_config)
datamodules[dataset] = datamodule_instance
return datamodules
def build_dataloader_and_sampler(
dataset_instance: torch.utils.data.Dataset, datamodule_config: DictConfig
) -> Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:
"""Builds and returns a dataloader along with its sample
Args:
dataset_instance (torch.utils.data.Dataset): Instance of dataset for which
dataloader has to be created
datamodule_config (omegaconf.DictConfig): Datamodule configuration; required
for infering params for dataloader
Returns:
Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:
Tuple of Dataloader and Sampler instance
"""
from mmf.common.batch_collator import BatchCollator
training_config = get_global_config("training")
# Support params coming in from dataloader params
other_args = {
"num_workers": datamodule_config.get(
"num_workers", training_config.get("num_workers", 4)
),
"pin_memory": datamodule_config.get(
"pin_memory", training_config.get("pin_memory", False)
),
"shuffle": datamodule_config.get("shuffle", None),
"batch_size": datamodule_config.get("batch_size", None),
}
# IterableDataset returns batches directly, so no need to add Sampler
# or batch size as user is expected to control those. This is a fine
# assumption for now to not support single item based IterableDataset
# as it will add unnecessary complexity and config parameters
# to the codebase
if not isinstance(dataset_instance, torch.utils.data.IterableDataset):
other_args = _add_extra_args_for_dataloader(dataset_instance, other_args)
else:
other_args.pop("shuffle")
loader = torch.utils.data.DataLoader(
dataset=dataset_instance,
collate_fn=BatchCollator(
dataset_instance.dataset_name, dataset_instance.dataset_type
),
drop_last=is_xla(), # see also MultiDatasetLoader.__len__
**other_args,
)
if is_xla():
device = xm.xla_device()
loader = xla_pl.MpDeviceLoader(loader, device)
if other_args["num_workers"] >= 0:
# Suppress leaking semaphore warning
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
loader.dataset_type = dataset_instance.dataset_type
return loader, other_args.get("sampler", None)
def build_test_reporter(
datamodules: List[pl.LightningDataModule],
config: DictConfig = None,
dataset_type: str = "train",
):
test_reporter_key = "default"
if config:
test_reporter_key = config.get("type", "default")
test_reporter_class = registry.get_test_rerporter_class(test_reporter_key)
assert (
test_reporter_class
), f"Key {test_reporter_key} doesn't have a registered test_reporter class"
if not config:
warnings.warn(
f"Config not provided for {test_reporter_key}, test_reporter"
+ "continuing with empty config"
)
params_config = OmegaConf.create()
else:
params_config = config.params
return test_reporter_class(datamodules, params_config, dataset_type)
def _add_extra_args_for_dataloader(
dataset_instance: torch.utils.data.Dataset, other_args: Dict[str, Any] = None
) -> Dict[str, Any]:
from mmf.utils.general import get_batch_size
dataset_type = dataset_instance.dataset_type
if other_args["shuffle"] is None:
other_args["shuffle"] = False
if dataset_type != "test":
other_args["shuffle"] = True
# In distributed mode, we use DistributedSampler from PyTorch
if is_dist_initialized():
other_args["sampler"] = torch.utils.data.DistributedSampler(
dataset_instance, shuffle=other_args["shuffle"]
)
# Shuffle is mutually exclusive with sampler, let DistributedSampler
# take care of shuffle and pop from main args
other_args.pop("shuffle")
if is_xla():
other_args["sampler"] = torch.utils.data.DistributedSampler(
dataset_instance,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=other_args["shuffle"],
)
other_args.pop("shuffle")
if other_args["batch_size"] is None:
other_args["batch_size"] = get_batch_size()
return other_args
def build_optimizer(model, config):
optimizer_config = config.optimizer
if "type" not in optimizer_config:
raise ValueError(
"Optimizer attributes must have a 'type' key "
"specifying the type of optimizer. "
"(Custom or PyTorch, e.g. 'adam_w' or 'SGD')"
)
optimizer_type = optimizer_config.type
if "params" not in optimizer_config:
warnings.warn("optimizer attributes has no params defined, defaulting to {}.")
params = optimizer_config.get("params", {})
if hasattr(torch.optim, optimizer_type):
optimizer_class = getattr(torch.optim, optimizer_type)
else:
optimizer_class = registry.get_optimizer_class(optimizer_type)
if optimizer_class is None:
raise ValueError(
"No optimizer class of type {} present in "
"either torch or registered to registry"
)
parameters = get_optimizer_parameters(model, config)
if optimizer_config.get("enable_state_sharding", False):
# TODO(vedanuj): Remove once OSS is moved to PT upstream
try:
from fairscale.optim.oss import OSS
except ImportError:
print(
"Optimizer state sharding requires fairscale. "
+ "Install using pip install fairscale."
)
raise
assert (
is_dist_initialized()
), "Optimizer state sharding can only be used in distributed mode."
is_fp16 = config.get("training", {}).get("fp16", False)
optimizer = OSS(
params=parameters, optim=optimizer_class, broadcast_fp16=is_fp16, **params
)
else:
optimizer = optimizer_class(parameters, **params)
return optimizer
def build_lightning_optimizers(model, config):
optimizer = build_optimizer(model, config)
if config.training.lr_scheduler:
lr_scheduler = build_scheduler(optimizer, config)
return {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": lr_scheduler, "interval": "step"},
}
else:
return optimizer
def build_scheduler(optimizer, config):
scheduler_config = config.get("scheduler", {})
if "type" not in scheduler_config:
warnings.warn(
"No type for scheduler specified even though lr_scheduler is True, "
"setting default to 'Pythia'"
)
scheduler_type = scheduler_config.get("type", "pythia")
if "params" not in scheduler_config:
warnings.warn("scheduler attributes has no params defined, defaulting to {}.")
params = scheduler_config.get("params", {})
scheduler_class = registry.get_scheduler_class(scheduler_type)
scheduler = scheduler_class(optimizer, **params)
return scheduler
def build_classifier_layer(config, *args, **kwargs):
from mmf.modules.layers import ClassifierLayer
classifier = ClassifierLayer(config.type, *args, **config.params, **kwargs)
return classifier.module
def build_text_encoder(config, *args, **kwargs):
"""Deprecated, please do not use"""
try:
from mmf.modules.fb.encoders import TextEncoderFactory
except ImportError:
from mmf.modules.encoders import TextEncoderFactory
text_encoder = TextEncoderFactory(config, *args, **kwargs)
return text_encoder.module
def build_image_encoder(config, direct_features=False, **kwargs):
"""Deprecated, please do not use"""
from mmf.modules.encoders import ImageEncoderFactory, ImageFeatureEncoderFactory
if direct_features:
module = ImageFeatureEncoderFactory(config)
else:
module = ImageEncoderFactory(config)
return module.module
def build_encoder(config: Union[DictConfig, "mmf.modules.encoders.Encoder.Config"]):
from mmf.modules.encoders import Encoder
# If it is not an OmegaConf object, create the object
if not isinstance(config, DictConfig) and isinstance(config, Encoder.Config):
config = OmegaConf.structured(config)
if "type" in config:
# Support config initialization in form of
# encoder:
# type: identity # noqa
# params:
# in_dim: 256
name = config.type
if isinstance(name, Enum):
name = name.value
params = config.get("params", None)
else:
# Structured Config support
name = config.name
params = config
encoder_cls = registry.get_encoder_class(name)
# If params were not passed, try generating them from encoder
# class's default config
if params is None:
params = OmegaConf.structured(getattr(encoder_cls, "Config", {}))
return encoder_cls(params)
def build_processors(
processors_config: DictConfig, registry_key: str = None, *args, **kwargs
) -> ProcessorDict:
"""Given a processor config, builds the processors present and returns back
a dict containing processors mapped to keys as per the config
Args:
processors_config (omegaconf.DictConfig): OmegaConf DictConfig describing
the parameters and type of each processor passed here
registry_key (str, optional): If passed, function would look into registry for
this particular key and return it back. .format with processor_key will
be called on this string. Defaults to None.
Returns:
ProcessorDict: Dictionary containing key to
processor mapping
"""
from mmf.datasets.processors.processors import Processor
processor_dict = {}
for processor_key, processor_params in processors_config.items():
if not processor_params:
continue
processor_instance = None
if registry_key is not None:
full_key = registry_key.format(processor_key)
processor_instance = registry.get(full_key, no_warning=True)
if processor_instance is None:
processor_instance = Processor(processor_params, *args, **kwargs)
# We don't register back here as in case of hub interface, we
# want the processors to be instantiate every time. BaseDataset
# can register at its own end
processor_dict[processor_key] = processor_instance
return processor_dict
def build_iteration_strategy(
config: DictConfig,
dataloaders: Dict[str, torch.utils.data.DataLoader],
*args,
**kwargs,
) -> IterationStrategy:
if not config.get("enabled", True):
return ConstantIterationStrategy.from_params(dataloaders, *args, **kwargs)
else:
assert (
"type" in config
), "multitasking config must define 'type' attribute if enabled"
# This assumes all dataloaders will have same dataset type
iteration_strategy_class = registry.get_iteration_strategy_class(config.type)
config = config.get("params", {})
dataset_type = dataloaders[list(dataloaders.keys())[0]].dataset.dataset_type
if dataset_type != "train":
logger.info(
f"{iteration_strategy_class.__name__} updated to size "
+ f"proportional for {dataset_type}"
)
return SizeProportionalIterationStrategy.from_params(
dataloaders, *args, **kwargs
)
else:
return iteration_strategy_class(config, dataloaders, *args, **kwargs)
def build_meters(run_type: str) -> List[Meter]:
train_meter, val_meter, test_meter = None, None, None
if "train" in run_type:
train_meter = Meter()
# val_meter used for validation after training loop
val_meter = Meter()
elif "val" in run_type or "inference" in run_type:
val_meter = Meter()
if "test" in run_type:
test_meter = Meter()
return train_meter, val_meter, test_meter
| [
"torch.utils.data.DistributedSampler"
] | 1.6.0 | JiesiZhao077/mmf | e20f0d29638c5d05e3e0c385fe67a9bfeef0f921 |
1.5 | import numpy as np
import torch
import torch.nn as nn
import random
import torch.distributions as D
class GenerativeFlow(nn.Module):
"""
Generative flow base class
For models performing density estimation and matching
"""
def __init__(self, args):
super(GenerativeFlow, self).__init__()
self.num_flows = args.num_flows
self.z_size = args.z_size
self.density_evaluation = args.density_evaluation
self.args = args
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_mean', torch.randn(self.z_size, device=args.device).normal_(0, 0.1))
self.register_buffer('base_dist_var', 3.0 * torch.ones(self.z_size, device=args.device))
# Normalizing flow layers
self.flow_transformation = None
# auxiliary
if args.cuda:
self.FloatTensor = torch.cuda.FloatTensor
else:
self.FloatTensor = torch.FloatTensor
# log-det-jacobian = 0 without flows
self.log_det_j = self.FloatTensor(1).zero_()
@property
def base_dist(self):
#rval = D.MultivariateNormal(self.base_dist_mean, self.base_dist_var)
rval = D.Normal(self.base_dist_mean, self.base_dist_var)
return rval
def forward(self):
raise NotImplementedError
| [
"torch.distributions.Normal",
"torch.randn",
"torch.ones"
] | 1.5.0 | robert-giaquinto/gradient-boosted-normalizing-flows | eca3726774f4498f1583bb79d4a9b955b4f51412 |
1.3 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.inits import glorot
from torch_geometric.utils import softmax
class HGTConv(MessagePassing):
def __init__(self, in_dim, out_dim, num_types, num_relations, n_heads, dropout=0.2, use_norm=True, use_RTE=True,
**kwargs):
super(HGTConv, self).__init__(node_dim=0, aggr='add', **kwargs)
self.in_dim = in_dim
self.out_dim = out_dim
self.num_types = num_types
self.num_relations = num_relations
self.total_rel = num_types * num_relations * num_types
self.n_heads = n_heads
self.d_k = out_dim // n_heads
self.sqrt_dk = math.sqrt(self.d_k)
self.use_norm = use_norm
self.use_RTE = use_RTE
self.att = None
self.k_linears = nn.ModuleList()
self.q_linears = nn.ModuleList()
self.v_linears = nn.ModuleList()
self.a_linears = nn.ModuleList()
self.norms = nn.ModuleList()
for t in range(num_types):
self.k_linears.append(nn.Linear(in_dim, out_dim))
self.q_linears.append(nn.Linear(in_dim, out_dim))
self.v_linears.append(nn.Linear(in_dim, out_dim))
self.a_linears.append(nn.Linear(out_dim, out_dim))
if use_norm:
self.norms.append(nn.LayerNorm(out_dim))
'''
TODO: make relation_pri smaller, as not all <st, rt, tt> pair exist in meta relation list.
'''
self.relation_pri = nn.Parameter(torch.ones(num_relations, self.n_heads))
self.relation_att = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
self.relation_msg = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
self.skip = nn.Parameter(torch.ones(num_types))
self.drop = nn.Dropout(dropout)
if self.use_RTE:
self.emb = RelTemporalEncoding(in_dim)
glorot(self.relation_att)
glorot(self.relation_msg)
def forward(self, node_inp, node_type, edge_index, edge_type, edge_time):
return self.propagate(edge_index, node_inp=node_inp, node_type=node_type, \
edge_type=edge_type, edge_time=edge_time)
def message(self, edge_index_i, node_inp_i, node_inp_j, node_type_i, node_type_j, edge_type, edge_time):
'''
j: source, i: target; <j, i>
'''
data_size = edge_index_i.size(0)
'''
Create Attention and Message tensor beforehand.
'''
res_att = torch.zeros(data_size, self.n_heads).to(node_inp_i.device)
res_msg = torch.zeros(data_size, self.n_heads, self.d_k).to(node_inp_i.device)
for source_type in range(self.num_types):
sb = (node_type_j == int(source_type))
k_linear = self.k_linears[source_type]
v_linear = self.v_linears[source_type]
for target_type in range(self.num_types):
tb = (node_type_i == int(target_type)) & sb
q_linear = self.q_linears[target_type]
for relation_type in range(self.num_relations):
'''
idx is all the edges with meta relation <source_type, relation_type, target_type>
'''
idx = (edge_type == int(relation_type)) & tb
if idx.sum() == 0:
continue
'''
Get the corresponding input node representations by idx.
Add tempotal encoding to source representation (j)
'''
target_node_vec = node_inp_i[idx]
source_node_vec = node_inp_j[idx]
if self.use_RTE:
source_node_vec = self.emb(source_node_vec, edge_time[idx])
'''
Step 1: Heterogeneous Mutual Attention
'''
q_mat = q_linear(target_node_vec).view(-1, self.n_heads, self.d_k)
k_mat = k_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
k_mat = torch.bmm(k_mat.transpose(1, 0), self.relation_att[relation_type]).transpose(1, 0)
res_att[idx] = (q_mat * k_mat).sum(dim=-1) * self.relation_pri[relation_type] / self.sqrt_dk
'''
Step 2: Heterogeneous Message Passing
'''
v_mat = v_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
res_msg[idx] = torch.bmm(v_mat.transpose(1, 0), self.relation_msg[relation_type]).transpose(1, 0)
'''
Softmax based on target node's id (edge_index_i). Store attention value in self.att for later visualization.
'''
self.att = softmax(res_att, edge_index_i)
res = res_msg * self.att.view(-1, self.n_heads, 1)
del res_att, res_msg
return res.view(-1, self.out_dim)
def update(self, aggr_out, node_inp, node_type):
'''
Step 3: Target-specific Aggregation
x = W[node_type] * gelu(Agg(x)) + x
'''
aggr_out = F.gelu(aggr_out)
res = torch.zeros(aggr_out.size(0), self.out_dim).to(node_inp.device)
for target_type in range(self.num_types):
idx = (node_type == int(target_type))
if idx.sum() == 0:
continue
trans_out = self.drop(self.a_linears[target_type](aggr_out[idx]))
'''
Add skip connection with learnable weight self.skip[t_id]
'''
alpha = torch.sigmoid(self.skip[target_type])
if self.use_norm:
res[idx] = self.norms[target_type](trans_out * alpha + node_inp[idx] * (1 - alpha))
else:
res[idx] = trans_out * alpha + node_inp[idx] * (1 - alpha)
return res
def __repr__(self):
return '{}(in_dim={}, out_dim={}, num_types={}, num_types={})'.format(
self.__class__.__name__, self.in_dim, self.out_dim,
self.num_types, self.num_relations)
class DenseHGTConv(MessagePassing):
def __init__(self, in_dim, out_dim, num_types, num_relations, n_heads, dropout=0.2, use_norm=True, use_RTE=True,
**kwargs):
super(DenseHGTConv, self).__init__(node_dim=0, aggr='add', **kwargs)
self.in_dim = in_dim
self.out_dim = out_dim
self.num_types = num_types
self.num_relations = num_relations
self.total_rel = num_types * num_relations * num_types
self.n_heads = n_heads
self.d_k = out_dim // n_heads
self.sqrt_dk = math.sqrt(self.d_k)
self.use_norm = use_norm
self.use_RTE = use_RTE
self.att = None
self.k_linears = nn.ModuleList()
self.q_linears = nn.ModuleList()
self.v_linears = nn.ModuleList()
self.a_linears = nn.ModuleList()
self.norms = nn.ModuleList()
for t in range(num_types):
self.k_linears.append(nn.Linear(in_dim, out_dim))
self.q_linears.append(nn.Linear(in_dim, out_dim))
self.v_linears.append(nn.Linear(in_dim, out_dim))
self.a_linears.append(nn.Linear(out_dim, out_dim))
if use_norm:
self.norms.append(nn.LayerNorm(out_dim))
'''
TODO: make relation_pri smaller, as not all <st, rt, tt> pair exist in meta relation list.
'''
self.relation_pri = nn.Parameter(torch.ones(num_relations, self.n_heads))
self.relation_att = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
self.relation_msg = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
self.drop = nn.Dropout(dropout)
if self.use_RTE:
self.emb = RelTemporalEncoding(in_dim)
glorot(self.relation_att)
glorot(self.relation_msg)
self.mid_linear = nn.Linear(out_dim, out_dim * 2)
self.out_linear = nn.Linear(out_dim * 2, out_dim)
self.out_norm = nn.LayerNorm(out_dim)
def forward(self, node_inp, node_type, edge_index, edge_type, edge_time):
return self.propagate(edge_index, node_inp=node_inp, node_type=node_type, \
edge_type=edge_type, edge_time=edge_time)
def message(self, edge_index_i, node_inp_i, node_inp_j, node_type_i, node_type_j, edge_type, edge_time):
'''
j: source, i: target; <j, i>
'''
data_size = edge_index_i.size(0)
'''
Create Attention and Message tensor beforehand.
'''
res_att = torch.zeros(data_size, self.n_heads).to(node_inp_i.device)
res_msg = torch.zeros(data_size, self.n_heads, self.d_k).to(node_inp_i.device)
for source_type in range(self.num_types):
sb = (node_type_j == int(source_type))
k_linear = self.k_linears[source_type]
v_linear = self.v_linears[source_type]
for target_type in range(self.num_types):
tb = (node_type_i == int(target_type)) & sb
q_linear = self.q_linears[target_type]
for relation_type in range(self.num_relations):
'''
idx is all the edges with meta relation <source_type, relation_type, target_type>
'''
idx = (edge_type == int(relation_type)) & tb
if idx.sum() == 0:
continue
'''
Get the corresponding input node representations by idx.
Add tempotal encoding to source representation (j)
'''
target_node_vec = node_inp_i[idx]
source_node_vec = node_inp_j[idx]
if self.use_RTE:
source_node_vec = self.emb(source_node_vec, edge_time[idx])
'''
Step 1: Heterogeneous Mutual Attention
'''
q_mat = q_linear(target_node_vec).view(-1, self.n_heads, self.d_k)
k_mat = k_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
k_mat = torch.bmm(k_mat.transpose(1, 0), self.relation_att[relation_type]).transpose(1, 0)
res_att[idx] = (q_mat * k_mat).sum(dim=-1) * self.relation_pri[relation_type] / self.sqrt_dk
'''
Step 2: Heterogeneous Message Passing
'''
v_mat = v_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
res_msg[idx] = torch.bmm(v_mat.transpose(1, 0), self.relation_msg[relation_type]).transpose(1, 0)
'''
Softmax based on target node's id (edge_index_i). Store attention value in self.att for later visualization.
'''
self.att = softmax(res_att, edge_index_i)
res = res_msg * self.att.view(-1, self.n_heads, 1)
del res_att, res_msg
return res.view(-1, self.out_dim)
def update(self, aggr_out, node_inp, node_type):
'''
Step 3: Target-specific Aggregation
x = W[node_type] * Agg(x) + x
'''
res = torch.zeros(aggr_out.size(0), self.out_dim).to(node_inp.device)
for target_type in range(self.num_types):
idx = (node_type == int(target_type))
if idx.sum() == 0:
continue
trans_out = self.drop(self.a_linears[target_type](aggr_out[idx])) + node_inp[idx]
'''
Add skip connection with learnable weight self.skip[t_id]
'''
if self.use_norm:
trans_out = self.norms[target_type](trans_out)
'''
Step 4: Shared Dense Layer
x = Out_L(gelu(Mid_L(x))) + x
'''
trans_out = self.drop(self.out_linear(F.gelu(self.mid_linear(trans_out)))) + trans_out
res[idx] = self.out_norm(trans_out)
return res
def __repr__(self):
return '{}(in_dim={}, out_dim={}, num_types={}, num_types={})'.format(
self.__class__.__name__, self.in_dim, self.out_dim,
self.num_types, self.num_relations)
class RelTemporalEncoding(nn.Module):
'''
Implement the Temporal Encoding (Sinusoid) function.
'''
def __init__(self, n_hid, max_len=240, dropout=0.2):
super(RelTemporalEncoding, self).__init__()
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_hid, 2) *
-(math.log(10000.0) / n_hid))
emb = nn.Embedding(max_len, n_hid)
emb.weight.data[:, 0::2] = torch.sin(position * div_term) / math.sqrt(n_hid)
emb.weight.data[:, 1::2] = torch.cos(position * div_term) / math.sqrt(n_hid)
emb.requires_grad = False
self.emb = emb
self.lin = nn.Linear(n_hid, n_hid)
def forward(self, x, t):
return x + self.lin(self.emb(t))
class GeneralConv(nn.Module):
def __init__(self, conv_name, in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm=True,
use_RTE=True):
super(GeneralConv, self).__init__()
self.conv_name = conv_name
if self.conv_name == 'hgt':
self.base_conv = HGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm, use_RTE)
elif self.conv_name == 'dense_hgt':
self.base_conv = DenseHGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout, use_norm,
use_RTE)
elif self.conv_name == 'gcn':
self.base_conv = GCNConv(in_hid, out_hid)
elif self.conv_name == 'gat':
self.base_conv = GATConv(in_hid, out_hid // n_heads, heads=n_heads)
def forward(self, meta_xs, node_type, edge_index, edge_type, edge_time):
if self.conv_name == 'hgt':
return self.base_conv(meta_xs, node_type, edge_index, edge_type, edge_time)
elif self.conv_name == 'gcn':
return self.base_conv(meta_xs, edge_index)
elif self.conv_name == 'gat':
return self.base_conv(meta_xs, edge_index)
elif self.conv_name == 'dense_hgt':
return self.base_conv(meta_xs, node_type, edge_index, edge_type, edge_time)
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cos",
"torch.zeros",
"torch.nn.ModuleList",
"torch.sin",
"torch.arange",
"torch.nn.functional.gelu",
"torch.ones",
"torch.Tensor",
"torch.nn.Embedding"
] | 1.3.0 | Suchun-sv/pyHGT | 49fb66e04386835d9dc3ba22abba121f8a960469 |
1.7 | import json
from functools import namedtuple
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from omegaconf import DictConfig
from cpc.dataset import AudioRawDataset
class ConvNetEncoder(nn.Module):
def __init__(self, hidden_size=512):
super().__init__()
self.hidden_size = hidden_size
self.conv1 = nn.Conv1d(1, hidden_size, kernel_size=10, stride=5, padding=3)
self.bnorm1 = nn.BatchNorm1d(hidden_size)
self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=8, stride=4, padding=2)
self.bnorm2 = nn.BatchNorm1d(hidden_size)
self.conv3 = nn.Conv1d(hidden_size, hidden_size, kernel_size=4, stride=2, padding=1)
self.bnorm3 = nn.BatchNorm1d(hidden_size)
self.conv4 = nn.Conv1d(hidden_size, hidden_size, kernel_size=4, stride=2, padding=1)
self.bnorm4 = nn.BatchNorm1d(hidden_size)
self.conv5 = nn.Conv1d(hidden_size, hidden_size, kernel_size=4, stride=2, padding=1)
self.bnorm5 = nn.BatchNorm1d(hidden_size)
@property
def input_port(self):
return (
('audio_signal', ('B', 'C', 'T')),
)
@property
def output_port(self):
return (
('encoder_embedding', ('B', 'T', 'C')),
)
def forward(self, x):
x = F.relu(self.bnorm1(self.conv1(x)))
x = F.relu(self.bnorm2(self.conv2(x)))
x = F.relu(self.bnorm3(self.conv3(x)))
x = F.relu(self.bnorm4(self.conv4(x)))
x = F.relu(self.bnorm5(self.conv5(x)))
x = x.transpose(1, 2) # Reminder: make the channel last
return x
class GRUAutoRegressiveModel(nn.Module):
def __init__(self, embedding_size=512, hidden_size=256, keep_hidden=False):
super().__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.keep_hidden = keep_hidden
self.hidden = None
self.rnn = nn.GRU(self.embedding_size, hidden_size=self.hidden_size, num_layers=1, batch_first=True)
def input_port(self):
return (
('encoder_embedding', ('B', 'T', 'C')),
)
def output_port(self):
return (
('ar_embedding', ('B', 'T', 'C')),
)
def forward(self, x):
x, h = self.rnn(x, self.hidden) # (batch, seq_len, hidden_size)
if self.keep_hidden:
self.hidden = h.detach()
return x
class LinearPredictionModel(nn.Module):
def __init__(self, ar_embedding_size=256, enc_embedding_size=512):
super().__init__()
self.ar_embedding_size = ar_embedding_size
self.linear = nn.Linear(ar_embedding_size, enc_embedding_size, bias=False)
def forward(self, x):
x = self.linear(x)
return x
class CPCCriterion(nn.Module):
def __init__(self, ar_embedding_size=256, enc_embedding_size=512, n_predictions=12, n_negs=8):
super().__init__()
self.ar_embedding_size = ar_embedding_size
self.enc_embedding_size = enc_embedding_size
self.n_predictions = n_predictions # max number of steps for prediction horizon
self.n_negs = n_negs # number of negative samples to be sampled
self.loss_function = nn.CrossEntropyLoss() # reminder: mean reduction by defualt
self.predictors = nn.ModuleList()
for _ in range(self.n_predictions):
self.predictors.append(
LinearPredictionModel(self.ar_embedding_size, self.enc_embedding_size)
)
@property
def input_port(self):
return (
('encoder_embedding', ('B', 'T', 'C')),
('ar_embedding', ('B', 'T', 'C')),
)
@property
def output_port(self):
return (
('loss', ('N',)),
('acc', ('N',))
)
def get_random_samples(self, z_features, window_size):
samples = []
batch_size, steps, z_dim = z_features.size()
# randomly sample n_negs * batch_size for each step
z_neg = z_features.contiguous().view(-1, z_dim)
sample_idx = torch.randint(low=0, high=batch_size*steps, size=(batch_size*self.n_negs*window_size,), device=z_features.device)
z_neg = z_neg[sample_idx].view(batch_size, self.n_negs, window_size, z_dim)
labels = torch.zeros(size=(batch_size*window_size,), dtype=torch.long, device=z_features.device)
for k in range(1, self.n_predictions + 1):
z_pos = z_features[:, k:k+window_size].unsqueeze(1)
sample = torch.cat([z_pos, z_neg], dim=1)
samples.append(sample)
return samples, labels
def forward(self, c_features, z_features, window_size):
c_features = c_features[:, :window_size]
samples, labels = self.get_random_samples(z_features, window_size)
losses = []
accs = []
for k in range(self.n_predictions):
z_pred = self.predictors[k](c_features)
z_pred = z_pred.unsqueeze(1)
prediction = (z_pred * samples[k]).sum(dim=3)
prediction = prediction.permute(0, 2, 1)
prediction = prediction.contiguous().view(-1, prediction.size(2))
#####################################################
# accuracy calculation, direct copy from: facebook/cpc_audio
_, pred_index = prediction.max(1)
acc = torch.sum(pred_index == labels).float()
#####################################################
loss = self.loss_function(prediction, labels)
losses.append(loss.view(1, -1))
accs.append(acc.view(1, -1))
return torch.cat(losses, dim=1), torch.cat(accs, dim=1) / labels.size(0)
class CPCAudioRawModel(pl.LightningModule):
def __init__(self, cfg: DictConfig):
super().__init__()
self.window_size = cfg.window_size // cfg.downsampling # number of steps in encoded space
self.encoder = ConvNetEncoder(**cfg.encoder)
self.ar = GRUAutoRegressiveModel(**cfg.ar)
self.cpc_criterion = CPCCriterion(**cfg.cpc_criterion)
self._train_dataset = None
self._validation_dataset = None
self._train_dataloader = None
self._val_dataloader = None
self._optimizers = None
self.setup_train_dataloader(cfg.train_data)
self.setup_val_dataloader(cfg.validation_data)
self.setup_optimizers(cfg.optim)
# this will save cfg as hyparams to ckpts and tensorboard
self.hparams = cfg
def setup_optimizers(self, optim_cfg: DictConfig):
self._optimizers = Adam(self.parameters(), **optim_cfg)
def setup_train_dataloader(self, train_data_cfg: DictConfig):
self._train_dataset = AudioRawDataset(
**train_data_cfg.dataset
)
self._train_dataloader = DataLoader(
self._train_dataset,
collate_fn=self._train_dataset.collate_fn,
**train_data_cfg.dataloader
)
def setup_val_dataloader(self, validation_data_cfg: DictConfig):
self._validation_dataset = AudioRawDataset(
**validation_data_cfg.dataset
)
self._val_dataloader = DataLoader(
self._validation_dataset,
collate_fn=self._validation_dataset.collate_fn,
**validation_data_cfg.dataloader
)
def forward(self, audio_signal):
z_features = self.encoder(audio_signal)
c_features = self.ar(z_features)
return z_features, c_features
def training_step(self, batch, batch_idx):
"""
batch: audio_signals; tensor (B, C, L)
"""
z_features, c_features = self(batch)
# c_features = c_features[:, :self.window_size]
# random_samples = self.get_random_samples(z_features)
losses, accs = self.cpc_criterion(c_features, z_features, self.window_size)
total_loss = losses.sum(dim=1)
aver_acc = accs.mean(dim=1)
self.log('train_aver_acc', aver_acc, on_step=True, prog_bar=True, logger=True)
self.log('train_loss', total_loss, on_step=True, prog_bar=True, logger=True)
return total_loss
def validation_step(self, batch, batch_idx):
"""
batch: audio_signals; tensor (B, C, L)
"""
z_features, c_features = self(batch)
# c_features = c_features[:, :self.window_size]
# random_samples = self.get_random_samples(z_features)
losses, accs = self.cpc_criterion(c_features, z_features, self.window_size)
total_loss = losses.sum(dim=1)
aver_acc = accs.mean(dim=1)
self.log('val_aver_acc', aver_acc, on_step=True, prog_bar=True, logger=True)
self.log('val_loss', total_loss, on_step=True, prog_bar=True, logger=True)
return total_loss
def train_dataloader(self):
if self._train_dataloader:
return self._train_dataloader
else:
raise AttributeError('Please setup_train_dataloader() first')
def val_dataloader(self):
if self._val_dataloader:
return self._val_dataloader
else:
raise AttributeError('Please setup_val_dataloader() first')
def configure_optimizers(self):
if self._optimizers:
return self._optimizers
else:
raise AttributeError('Please setup_optimizers() first')
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.GRU",
"torch.nn.ModuleList",
"torch.nn.Conv1d",
"torch.randint",
"torch.nn.BatchNorm1d",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.7.0 | lokhiufung/quick-and-dirty-dl | f2e4429451543e9e9a44ed5304e268cf3c2aa888 |
1.8 | r"""Hodgkin-Huxley (HH) benchmark.
HH is a widespread non-linear mechanistic model of neural dynamics.
References:
A quantitative description of membrane current and its application to conduction and excitation in nerve
(Hodgkin et al., 1952)
https://link.springer.com/article/10.1007/BF02459568
Training deep neural density estimators to identify mechanistic models of neural dynamics
(Gonçalves et al., 2020)
https://elifesciences.org/articles/56261
Shapes:
theta: :math:`(8,)`.
x: :math:`(7,)`.
"""
import numpy as np
import torch
from numpy import ndarray as Array
from torch import Tensor, BoolTensor
from typing import *
from . import Simulator
LABELS = [
f'${l}$' for l in [
r'g_{\mathrm{Na}}', r'g_{\mathrm{K}}', r'g_{\mathrm{M}}', 'g_l',
r'\tau_{\max}', 'V_t', r'\sigma', 'E_l',
]
]
LOWER, UPPER = torch.tensor([
[0.5, 80.], # g_Na [mS/cm^2]
[1e-4, 15.], # g_K [mS/cm^2]
[1e-4, .6], # g_M [mS/cm^2]
[1e-4, .6], # g_l [mS/cm^2]
[50., 3000.], # tau_max [ms]
[-90., -40.], # V_t [mV]
[1e-4, .15], # sigma [uA/cm^2]
[-100., -35.], # E_l [mV]
]).t()
class HH(Simulator):
r"""Creates an Hodgkin-Huxley (HH) simulator.
Arguments:
summary: Whether voltage traces are converted to summary statistics or not.
seed: A random number generator seed.
kwargs: Simulator settings and constants (e.g. duration, inital voltage, ...).
"""
def __init__(self, summary: bool = True, seed: int = None, **kwargs):
super().__init__()
# Constants
default = {
'duration': 80., # s
'time_step': 0.02, # s
'padding': 10., # s
'initial_voltage': -70., # mV
'current': 5e-4 / (np.pi * 7e-3 ** 2), # uA / cm^2
}
self.constants = {
k: kwargs.get(k, v)
for k, v in default.items()
}
# Summary statistics
self.summary = summary
# RNG
self.rng = np.random.default_rng(seed)
def __call__(self, theta: Array) -> Array:
x = voltage_trace(theta, self.constants, self.rng)
if self.summary:
x = summarize(x, self.constants)
return x
def voltage_trace(
theta: Array,
constants: Dict[str, float],
rng: np.random.Generator,
) -> Array:
r"""Simulates an Hodgkin-Huxley voltage trace.
References:
https://github.com/mackelab/sbi/blob/main/examples/HH_helper_functions.py
"""
# Parameters
T = constants['duration']
dt = constants['time_step']
pad = constants['padding']
V_0 = constants['initial_voltage']
I = constants['current']
theta = np.expand_dims(theta, axis=0)
g_Na, g_K, g_M, g_leak, tau_max, V_t, sigma, E_leak = [
theta[..., i] for i in range(8)
]
C = 1. # uF/cm^2
E_Na = 53. # mV
E_K = -107. # mV
# Kinetics
exp = np.exp
efun = lambda x: np.where(
np.abs(x) < 1e-4,
1 - x / 2,
x / (exp(x) - 1)
)
alpha_n = lambda x: 0.032 * efun(-0.2 * (x - 15)) / 0.2
beta_n = lambda x: 0.5 * exp(-(x - 10) / 40)
tau_n = lambda x: 1 / (alpha_n(x) + beta_n(x))
n_inf = lambda x: alpha_n(x) / (alpha_n(x) + beta_n(x))
alpha_m = lambda x: 0.32 * efun(-0.25 * (x - 13)) / 0.25
beta_m = lambda x: 0.28 * efun(0.2 * (x - 40)) / 0.2
tau_m = lambda x: 1 / (alpha_m(x) + beta_m(x))
m_inf = lambda x: alpha_m(x) / (alpha_m(x) + beta_m(x))
alpha_h = lambda x: 0.128 * exp(-(x - 17) / 18)
beta_h = lambda x: 4 / (1 + exp(-0.2 * (x - 40)))
tau_h = lambda x: 1 / (alpha_h(x) + beta_h(x))
h_inf = lambda x: alpha_h(x) / (alpha_h(x) + beta_h(x))
tau_p = lambda x: tau_max / (3.3 * exp(0.05 * (x + 35)) + exp(-0.05 * (x + 35)))
p_inf = lambda x: 1 / (1 + exp(-0.1 * (x + 35)))
# Iterations
voltages = []
V = np.full_like(V_t, V_0)
V_rel = V - V_t
n = n_inf(V_rel)
m = m_inf(V_rel)
h = h_inf(V_rel)
p = p_inf(V)
for t in np.arange(0, T, dt):
tau_V = C / (
g_Na * m**3 * h
+ g_K * n**4
+ g_M * p
+ g_leak
)
V_inf = tau_V * (
E_Na * g_Na * m**3 * h
+ E_K * g_K * n**4
+ E_K * g_M * p
+ E_leak * g_leak
+ I * (pad <= t < T - pad)
+ sigma * rng.standard_normal(V.shape) / dt**0.5
) / C
V = V_inf + (V - V_inf) * exp(-dt / tau_V)
V_rel = V - V_t
n = n_inf(V_rel) + (n - n_inf(V_rel)) * exp(-dt / tau_n(V_rel))
m = m_inf(V_rel) + (m - m_inf(V_rel)) * exp(-dt / tau_m(V_rel))
h = h_inf(V_rel) + (h - h_inf(V_rel)) * exp(-dt / tau_h(V_rel))
p = p_inf(V) + (p - p_inf(V)) * exp(-dt / tau_p(V))
voltages.append(V)
return np.stack(voltages, axis=-1).squeeze(axis=0)
def summarize(x: Array, constants: Dict[str, float]) -> Array:
r"""Returns summary statistics of a voltage trace."""
# Constants
T = constants['duration']
dt = constants['time_step']
pad = constants['padding']
t = np.arange(0, T, dt)
# Number of spikes
spikes = np.maximum(x, -10)
spikes = np.diff(np.sign(np.diff(spikes)))
spikes = np.sum(spikes < 0, axis=-1)
# Resting moments
rest = x[..., (pad / 2 <= t) * (t < pad)]
rest_mean = np.mean(rest, axis=-1)
rest_std = np.std(rest, axis=-1)
# Moments
x = x[..., (pad <= t) * (t < T - pad)]
x_mean = np.mean(x, axis=-1)
x_std = np.std(x, axis=-1)
z = (x - x_mean[..., None]) / x_std[..., None]
x_skew = np.mean(z**3, axis=-1)
x_kurtosis = np.mean(z**4, axis=-1)
return np.stack([
spikes,
rest_mean, rest_std,
x_mean, x_std, x_skew, x_kurtosis,
], axis=-1)
| [
"torch.tensor"
] | 1.8.0 | francois-rozet/lampe | 50e53c767ee5d98502ec8520b3bca554f2169eb7 |
1.1 | """
Implementation of non-negative matrix factorization for GPU
"""
from datetime import datetime
from nimfa.methods.seeding import nndsvd
import numpy as np
import torch
import torch.nn
from torch import nn
class NMF:
def __init__(self, V, rank, max_iterations=100000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,
init_method='random', floating_point_precision='double', min_iterations=2000):
"""
Run non-negative matrix factorisation using GPU. Uses beta-divergence.
Args:
V: Matrix to be factorised
rank: (int) number of latent dimensnions to use in factorisation
max_iterations: (int) Maximum number of update iterations to use during fitting
tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence
test_conv: (int) How often to test for convergnce
gpu_id: (int) Which GPU device to use
seed: random seed, if None (default) datetime is used
init_method: how to initialise basis and coefficient matrices, options are:
- random (will always be the same if seed != None)
- NNDSVD
- NNDSVDa (fill in the zero elements with the average),
- NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).
floating_point_precision: (string or type). Can be `double`, `float` or any type/string which
torch can interpret.
min_iterations: the minimum number of iterations to execute before termination. Useful when using
fp32 tensors as convergence can happen too early.
"""
torch.cuda.set_device(gpu_id)
if seed is None:
seed = datetime.now().timestamp()
if floating_point_precision == 'float':
self._tensor_type = torch.FloatTensor
elif floating_point_precision == 'double':
self._tensor_type = torch.DoubleTensor
else:
self._tensor_type = floating_point_precision
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.max_iterations = max_iterations
self.min_iterations = min_iterations
# If V is not in a batch, put it in a batch of 1
if len(V.shape) == 2:
V = V[None, :, :]
self._V = V.type(self._tensor_type).cuda()
self._fix_neg = nn.Threshold(0., 1e-8)
self._tolerance = tolerance
self._prev_loss = None
self._iter = 0
self._test_conv = test_conv
self._gpu_id = gpu_id
self._rank = rank
self._W, self._H = self._initialise_wh(init_method)
def _initialise_wh(self, init_method):
"""
Initialise basis and coefficient matrices according to `init_method`
"""
if init_method == 'random':
W = torch.rand(self._V.shape[0], self._V.shape[1], self._rank).type(self._tensor_type).cuda()
H = torch.rand(self._V.shape[0], self._rank, self._V.shape[2]).type(self._tensor_type).cuda()
return W, H
elif init_method == 'NNDSVD':
nv = nndsvd.Nndsvd()
vin = np.mat(self._V.cpu().numpy())
W, H = nv.initialize(vin, self._rank, options={'flag': 0})
elif init_method == 'NNDSVDa':
nv = nndsvd.Nndsvd()
vin = np.mat(self._V.cpu().numpy())
W, H = nv.initialize(vin, self._rank, options={'flag': 1})
elif init_method == 'NNDSVDar':
nv = nndsvd.Nndsvd()
vin = np.mat(self._V.cpu().numpy())
W, H = nv.initialize(vin, self._rank, options={'flag': 2})
W = torch.from_numpy(W).type(self._tensor_type).cuda(self._gpu_id)
H = torch.from_numpy(H).type(self._tensor_type).cuda(self._gpu_id)
return W, H
@property
def reconstruction(self):
return self.W @ self.H
@property
def W(self):
return self._W
@property
def H(self):
return self._H
@property
def _kl_loss(self):
return (self._V * (self._V / self.reconstruction).log()).sum() - self._V.sum() + self.reconstruction.sum()
@property
def _loss_converged(self):
"""
Check if loss has converged
"""
if not self._iter:
self._loss_init = self._kl_loss
elif ((self._prev_loss - self._kl_loss) / self._loss_init) < self._tolerance:
return True
self._prev_loss = self._kl_loss
return False
def fit(self, beta=1):
"""
Fit the basis (W) and coefficient (H) matrices to the input matrix (V) using multiplicative updates and
beta divergence
Args:
beta: value to use for generalised beta divergence. Default is 1 for KL divergence
beta == 2 => Euclidean updates
beta == 1 => Generalised Kullback-Leibler updates
beta == 0 => Itakura-Saito updates
"""
with torch.no_grad():
def stop_iterations():
stop = (self._V.shape[0] == 1) and \
(self._iter % self._test_conv == 0) and \
self._loss_converged and \
(self._iter > self.min_iterations)
if stop:
print("loss converged with {} iterations".format(self._iter))
return stop
if beta == 2:
for self._iter in range(self.max_iterations):
self.H = self.H * (self.W.transpose(1, 2) @ self._V) / (self.W.transpose(1, 2) @ (self.W @ self.H))
self.W = self.W * (self._V @ self.H.transpose(1, 2)) / (self.W @ (self.H @ self.H.transpose(1, 2)))
if stop_iterations():
break
# Optimisations for the (common) beta=1 (KL) case.
elif beta == 1:
ones = torch.ones(self._V.shape).type(self._tensor_type).cuda(self._gpu_id)
for self._iter in range(self.max_iterations):
ht = self.H.transpose(1, 2)
numerator = (self._V / (self.W @ self.H)) @ ht
denomenator = ones @ ht
self._W *= numerator / denomenator
wt = self.W.transpose(1, 2)
numerator = wt @ (self._V / (self.W @ self.H))
denomenator = wt @ ones
self._H *= numerator / denomenator
if stop_iterations():
break
else:
for self._iter in range(self.max_iterations):
self.H = self.H * ((self.W.transpose(1, 2) @ (((self.W @ self.H) ** (beta - 2)) * self._V)) /
(self.W.transpose(1, 2) @ ((self.W @ self.H)**(beta-1))))
self.W = self.W * (((([email protected])**(beta-2) * self._V) @ self.H.transpose(1, 2)) /
(((self.W @ self.H) ** (beta - 1)) @ self.H.transpose(1, 2)))
if stop_iterations():
break
| [
"torch.rand",
"torch.cuda.manual_seed",
"torch.no_grad",
"torch.nn.Threshold",
"torch.from_numpy",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.ones"
] | 1.1.0 | edawson/SigProfilerExtractor | a9cc43ddba2271ec68ff4d1a6a93399386713325 |
1.0 | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALBERT model. """
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_albert import AlbertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "AlbertConfig"
_TOKENIZER_FOR_DOC = "AlbertTokenizer"
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"albert-base-v1",
"albert-large-v1",
"albert-xlarge-v1",
"albert-xxlarge-v1",
"albert-base-v2",
"albert-large-v2",
"albert-xlarge-v2",
"albert-xxlarge-v2",
# See all ALBERT models at https://huggingface.co/models?filter=albert
]
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
print(name)
for name, array in zip(names, arrays):
original_name = name
# If saved from the TF HUB module
name = name.replace("module/", "")
# Renaming and simplifying
name = name.replace("ffn_1", "ffn")
name = name.replace("bert/", "albert/")
name = name.replace("attention_1", "attention")
name = name.replace("transform/", "")
name = name.replace("LayerNorm_1", "full_layer_layer_norm")
name = name.replace("LayerNorm", "attention/LayerNorm")
name = name.replace("transformer/", "")
# The feed forward layer had an 'intermediate' step which has been abstracted away
name = name.replace("intermediate/dense/", "")
name = name.replace("ffn/intermediate/output/dense/", "ffn_output/")
# ALBERT attention was split between self and output which have been abstracted away
name = name.replace("/output/", "/")
name = name.replace("/self/", "/")
# The pooler is a linear layer
name = name.replace("pooler/dense", "pooler")
# The classifier was simplified to predictions from cls/predictions
name = name.replace("cls/predictions", "predictions")
name = name.replace("predictions/attention", "predictions")
# Naming was changed to be more explicit
name = name.replace("embeddings/attention", "embeddings")
name = name.replace("inner_group_", "albert_layers/")
name = name.replace("group_", "albert_layer_groups/")
# Classifier
if len(name.split("/")) == 1 and ("output_bias" in name or "output_weights" in name):
name = "classifier/" + name
# No ALBERT model currently handles the next sentence prediction task
if "seq_relationship" in name:
name = name.replace("seq_relationship/output_", "sop_classifier/classifier/")
name = name.replace("weights", "weight")
name = name.split("/")
# Ignore the gradients applied by the LAMB/ADAM optimizers.
if (
"adam_m" in name
or "adam_v" in name
or "AdamWeightDecayOptimizer" in name
or "AdamWeightDecayOptimizer_1" in name
or "global_step" in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print("Initialize PyTorch weight {} from {}".format(name, original_name))
pointer.data = torch.from_numpy(array)
return model
class AlbertEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class AlbertAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1)
# Update hyper params and store pruned heads
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_ids, attention_mask=None, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(input_ids)
mixed_key_layer = self.key(input_ids)
mixed_value_layer = self.value(input_ids)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
# Should find a better way to do this
w = (
self.dense.weight.t()
.view(self.num_attention_heads, self.attention_head_size, self.hidden_size)
.to(context_layer.dtype)
)
b = self.dense.bias.to(context_layer.dtype)
projected_context_layer = torch.einsum("bfnd,ndh->bfh", context_layer, w) + b
projected_context_layer_dropout = self.output_dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(input_ids + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,)
class AlbertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = AlbertAttention(config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False
):
attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
ffn_output = apply_chunking_to_forward(
self.ff_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[0],
)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:] # add attentions if we output them
def ff_chunk(self, attention_output):
ffn_output = self.ffn(attention_output)
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
return ffn_output
class AlbertLayerGroup(nn.Module):
def __init__(self, config):
super().__init__()
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(
self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False
):
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
hidden_states = layer_output[0]
if output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if output_attentions:
outputs = outputs + (layer_attentions,)
return outputs # last-layer hidden state, (layer hidden states), (layer attentions)
class AlbertTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_hidden_states = (hidden_states,) if output_hidden_states else None
all_attentions = () if output_attentions else None
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
output_attentions,
output_hidden_states,
)
hidden_states = layer_group_output[0]
if output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class AlbertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = AlbertConfig
base_model_prefix = "albert"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class AlbertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.AlbertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
sop_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
sop_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
ALBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Args:
config (:class:`~transformers.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.AlbertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ALBERT Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
else:
self.pooler = None
self.pooler_activation = None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer.
Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with two heads on top as done during the pre-training: a `masked language modeling` head and a
`sentence order prediction (classification)` head.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForPreTraining(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.sop_classifier = AlbertSOPHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.predictions.decoder
def get_input_embeddings(self):
return self.albert.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
sentence_order_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
sentence_order_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``. ``0`` indicates original order (sequence
A, then sequence B), ``1`` indicates switched order (sequence B, then sequence A).
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import AlbertTokenizer, AlbertForPreTraining
>>> import torch
>>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
>>> model = AlbertForPreTraining.from_pretrained('albert-base-v2')
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> sop_logits = outputs.sop_logits
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(sequence_output)
sop_scores = self.sop_classifier(pooled_output)
total_loss = None
if labels is not None and sentence_order_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
total_loss = masked_lm_loss + sentence_order_loss
if not return_dict:
output = (prediction_scores, sop_scores) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return AlbertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
sop_logits=sop_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class AlbertMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states
return prediction_scores
class AlbertSOPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, pooled_output):
dropout_pooled_output = self.dropout(pooled_output)
logits = self.classifier(dropout_pooled_output)
return logits
@add_start_docstrings(
"Albert Model with a `language modeling` head on top.",
ALBERT_START_DOCSTRING,
)
class AlbertForMaskedLM(AlbertPreTrainedModel):
authorized_unexpected_keys = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config, add_pooling_layer=False)
self.predictions = AlbertMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.predictions.decoder
def get_input_embeddings(self):
return self.albert.embeddings.word_embeddings
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_outputs = outputs[0]
prediction_scores = self.predictions(sequence_outputs)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForSequenceClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,
config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForTokenClassification(AlbertPreTrainedModel):
authorized_unexpected_keys = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ALBERT_START_DOCSTRING,
)
class AlbertForQuestionAnswering(AlbertPreTrainedModel):
authorized_unexpected_keys = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.albert = AlbertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.albert(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ALBERT_START_DOCSTRING,
)
class AlbertForMultipleChoice(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.MSELoss",
"torch.nn.Softmax",
"torch.einsum",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.arange",
"torch.from_numpy",
"torch.ones",
"torch.matmul",
"torch.nn.Embedding"
] | 1.0 | ceostroff/transformers | 3095ee9dab739f212a8753b5be4e1a72ba42e28e |
1.0 | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch REFORMER model. """
import sys
from collections import namedtuple
from dataclasses import dataclass
from functools import reduce
from operator import mul
from typing import List, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.autograd.function import Function
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward
from ...utils import logging
from .configuration_reformer import ReformerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ReformerConfig"
_TOKENIZER_FOR_DOC = "ReformerTokenizer"
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/reformer-crime-and-punishment",
"google/reformer-enwik8",
# See all Reformer models at https://huggingface.co/models?filter=reformer
]
# Define named tuples for nn.Modules here
LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"])
LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"])
AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"])
ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"])
ReformerBackwardOutput = namedtuple(
"ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"]
)
ReformerEncoderOutput = namedtuple(
"ReformerEncoderOutput",
["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"],
)
def _stable_argsort(vector, dim):
# this function scales the vector so that torch.argsort is stable.
# torch.argsort is not stable on its own
scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1)
scale_offset = scale_offset.expand(vector.shape)
scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim])
return torch.argsort(scaled_vector, dim=dim)
def _get_least_common_mult_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format(
config.attn_layers
)
)
def _get_min_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format(
config.attn_layers
)
)
class AxialPositionEmbeddings(nn.Module):
"""
Constructs axial position embeddings. Useful for very long input sequences to save memory and time.
"""
def __init__(self, config):
super().__init__()
self.axial_pos_shape = config.axial_pos_shape
self.axial_pos_embds_dim = config.axial_pos_embds_dim
self.dropout = config.hidden_dropout_prob
self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)
self.weights = nn.ParameterList()
assert (
sum(self.axial_pos_embds_dim) == config.hidden_size
), "Make sure that config.axial_pos_embds factors: {} sum to config.hidden_size: {}".format(
self.axial_pos_embds_dim, config.hidden_size
)
# create weights
for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):
# create expanded shapes
ax_shape = [1] * len(self.axial_pos_shape)
ax_shape[axis] = self.axial_pos_shape[axis]
ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)
# create tensor and init
self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))
def forward(self, position_ids):
# broadcast weights to correct shape
batch_size = position_ids.shape[0]
sequence_length = position_ids.shape[1]
broadcasted_weights = [
weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights
]
if self.training is True:
assert (
reduce(mul, self.axial_pos_shape) == sequence_length
), "If training, make sure that config.axial_pos_shape factors: {} multiply to sequence length. Got prod({}) != sequence_length: {}. You might want to consider padding your sequence length to {} or changing config.axial_pos_shape.".format(
self.axial_pos_shape, self.axial_pos_shape, sequence_length, reduce(mul, self.axial_pos_shape)
)
if self.dropout > 0:
weights = torch.cat(broadcasted_weights, dim=-1)
# permute weights so that 2D correctly drops dims 1 and 2
transposed_weights = weights.transpose(2, 1)
# drop entire matrix of last two dims (prev dims 1 and 2)
dropped_transposed_weights = nn.functional.dropout2d(
transposed_weights, p=self.dropout, training=self.training
)
dropped_weights = dropped_transposed_weights.transpose(2, 1)
position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1))
else:
position_encodings = torch.cat(
[torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights],
dim=-1,
)
else:
assert (
reduce(mul, self.axial_pos_shape) >= sequence_length
), "Make sure that config.axial_pos_shape factors: {} multiply at least to max(sequence_length, least_common_mult_chunk_length): max({}, {})".format(
self.axial_pos_shape,
sequence_length,
self.least_common_mult_chunk_length,
)
# compute how many columns are needed
max_position_id = position_ids.max().item()
required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1])
# cut to columns that are needed
position_encodings = torch.cat(
[weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1
)
position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1]))
# select correct position encodings
position_encodings = torch.cat(
[
torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0)
for i in range(batch_size)
],
dim=0,
)
return position_encodings
class PositionEmbeddings(nn.Module):
"""Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`."""
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
def forward(self, position_ids):
position_embeddings = self.embedding(position_ids)
position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training)
return position_embeddings
class ReformerEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.hidden_dropout_prob
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = (
AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config)
)
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(
start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
assert (
position_ids.shape[-1] <= self.max_position_embeddings
), "Sequence Length: {} has to be larger equal than config.max_position_embeddings: {}".format(
position_ids.shape[-1], self.max_position_embeddings
)
# dropout
embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training)
# add positional embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class EfficientAttentionMixin:
"""
A few utilities for nn.Modules in Reformer, to be used as a mixin.
"""
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
"""
Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).
"""
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
"""
splits hidden_size dim into attn_head_size and num_attn_heads
"""
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
return x.transpose(2, 1)
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
"""
merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
x = x.permute(0, 2, 1, 3)
return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))
def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):
"""
splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims
"""
batch_size = vectors.shape[0]
split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)
if len(vectors.shape) == 4:
return torch.reshape(vectors, split_dim_shape + (attn_head_size,))
elif len(vectors.shape) == 3:
return torch.reshape(vectors, split_dim_shape)
else:
raise ValueError("Input vector rank should be one of [3, 4], but is: {}".format(len(vectors.shape)))
class LSHSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_length = config.lsh_attn_chunk_length
self.num_hashes = config.num_hashes
self.num_buckets = config.num_buckets
self.num_chunks_before = config.lsh_num_chunks_before
self.num_chunks_after = config.lsh_num_chunks_after
self.hash_seed = config.hash_seed
self.is_decoder = config.is_decoder
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.lsh_attention_probs_dropout_prob
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
# projection matrices
self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
# save mask value here. Need fp32 and fp16 mask values
self.register_buffer("self_mask_value_float16", torch.tensor(-1e3))
self.register_buffer("self_mask_value_float32", torch.tensor(-1e5))
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
buckets=None,
past_buckets_states=None,
use_cache=False,
output_attentions=False,
**kwargs
):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
# num hashes can optionally be overwritten by user
num_hashes = num_hashes if num_hashes is not None else self.num_hashes
do_cached_attention = use_cache and past_buckets_states[1] is not None
# check if cache shall be used and that hidden states are already cached
if do_cached_attention:
assert (
sequence_length == 1
), f"At the moment, auto-regressive language generation is only possible one word at a time. Make sure that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed."
past_buckets = past_buckets_states[0]
past_states = past_buckets_states[1]
# get query vector
query_vectors = self.query_key(hidden_states)
query_vectors = self._split_hidden_size_dim(
query_vectors, self.num_attention_heads, self.attention_head_size
)
if past_buckets is not None:
key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets(
query_vectors=query_vectors,
attention_mask=attention_mask,
num_hashes=num_hashes,
hidden_states=hidden_states,
past_states=past_states,
past_buckets=past_buckets,
)
query_key_vectors = self._query_per_attn_head(key_value_hidden_states)
value_vectors = self._value_per_attn_head(key_value_hidden_states)
# split key & value vectors by num hashes to apply
# self attention on each separately
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
num_hashes,
-1,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
num_hashes,
-1,
self.num_attention_heads,
self.attention_head_size,
)
# repeat query vectors across hash dimension
query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)
else:
key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1)
query_key_vectors = self.query_key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
else:
# project hidden_states to query_key and value
query_vectors = None
query_key_vectors = self.query_key(hidden_states)
value_vectors = self.value(hidden_states)
# if query key is not already split
if not do_cached_attention or past_buckets is None:
query_key_vectors = self._split_hidden_size_dim(
query_key_vectors, self.num_attention_heads, self.attention_head_size
)
value_vectors = self._split_hidden_size_dim(
value_vectors, self.num_attention_heads, self.attention_head_size
)
# cache buckets for next incremental decoding
if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length:
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
# free memory
del hidden_states
assert (
query_key_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
query_key_vectors.shape[-1], self.attention_head_size
)
assert (
value_vectors.shape[-1] == self.attention_head_size
), "last dim of value_vectors is {} but should be {}.".format(
value_vectors.shape[-1], self.attention_head_size
)
do_standard_self_attention = (sequence_length <= self.chunk_length) or (
use_cache and past_buckets_states[1] is not None
)
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
# use cached buckets for backprop only
if buckets is None:
# hash query key vectors into buckets
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
else:
# make sure buckets has correct shape for LSH attention
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length)
assert (
int(buckets.shape[-1]) == num_hashes * sequence_length
), "last dim of buckets is {}, but should be {}".format(buckets.shape[-1], num_hashes * sequence_length)
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
elif do_cached_attention and past_buckets is not None:
# use max sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# scale key vectors
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_vectors if query_vectors is not None else query_key_vectors
# free memory
del query_key_vectors
# get attention probs
out_vectors, logits, attention_probs = self._attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=head_mask,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=do_cached_attention,
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention or (do_cached_attention and past_buckets is not None):
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
), "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`."
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
if buckets is not None:
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1)
return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)
def _query_per_attn_head(self, hidden_states):
per_head_query_key = self.query_key.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key)
return query_key_vectors
def _value_per_attn_head(self, hidden_states):
per_head_value = self.value.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value)
return value_vectors
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
batch_size = vectors.shape[0]
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
if isinstance(self.num_buckets, int):
assert (
self.num_buckets % 2 == 0
), "There should be an even number of bucktes, but `self.num_bucktes`: {}".format(self.num_buckets)
rotation_size = self.num_buckets
num_buckets = self.num_buckets
else:
# Factorize the hash if self.num_buckets is a list or tuple
rotation_size, num_buckets = 0, 1
for bucket_factor in self.num_buckets:
assert bucket_factor % 2 == 0, "The number of buckets should be even, but `num_bucket`: {}".format(
bucket_factor
)
rotation_size = rotation_size + bucket_factor
num_buckets = num_buckets * bucket_factor
# remove gradient
vectors = vectors.detach()
if self.hash_seed is not None:
# for determinism
torch.manual_seed(self.hash_seed)
rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)
# create a random self.attention_head_size x num_hashes x num_buckets/2
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
# Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2
rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations)
if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
else:
# Get the buckets for them and combine.
buckets, cur_sum, cur_product = None, 0, 1
for bucket_factor in self.num_buckets:
rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)]
cur_sum = cur_sum + bucket_factor // 2
rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)
if buckets is None:
buckets = torch.argmax(rotated_vectors_factor, dim=-1)
else:
buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))
cur_product = cur_product * bucket_factor
if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]):
# add an extra bucket for padding tokens only
num_buckets = num_buckets + 1
# assign padding tokens extra bucket
buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(
buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device)
)
elif increase_num_buckets:
num_buckets = num_buckets + 1
# buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).
# Next we add offsets so that bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view((1, 1, -1, 1))
# expand to batch size and num attention heads
offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])
offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offset_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):
# no gradients are needed
with torch.no_grad():
# hash-based sort
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
def _set_num_buckets(self, sequence_length):
# `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper
num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1
# make sure buckets are power of 2
num_buckets = 2 ** num_buckets_pow_2
# factorize `num_buckets` if `num_buckets` becomes too large
num_buckets_limit = 2 * max(
int((self.max_position_embeddings // self.chunk_length) ** (0.5)),
self.chunk_length,
)
if num_buckets > num_buckets_limit:
num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]
logger.warning("config.num_buckets is not set. Setting config.num_buckets to {}...".format(num_buckets))
# set num buckets in config to be properly saved
self.config.num_buckets = num_buckets
self.num_buckets = num_buckets
def _attend(
self,
query_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attention_mask,
head_mask,
do_standard_self_attention,
do_cached_attention,
):
# look at previous and following chunks if chunked attention
if not do_standard_self_attention:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
# get logits and dots
# (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft))
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
# if chunked attention split bucket idxs to query and key
if not do_standard_self_attention:
query_bucket_idx = self._split_seq_length_dim_to(
sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads
)
key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)
elif do_cached_attention and query_key_dots.ndim > 4:
key_value_bucket_idx = sorted_bucket_idx_per_hash
query_bucket_idx = (
key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max()
)
elif do_cached_attention and query_key_dots.ndim <= 4:
query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1]
key_value_bucket_idx = torch.arange(
query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device
)[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
# get correct mask values depending on precision
if query_key_dots.dtype == torch.float16:
self_mask_value = self.self_mask_value_float16.half()
mask_value = self.mask_value_float16.half()
else:
self_mask_value = self.self_mask_value_float32
mask_value = self.mask_value_float32
if not do_cached_attention:
mask = self._compute_attn_mask(
query_bucket_idx,
key_value_bucket_idx,
attention_mask,
query_key_dots.shape,
do_standard_self_attention,
)
if mask is not None:
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# Self mask is ALWAYS applied.
# From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):
# " While attention to the future is not allowed, typical implementations of the
# Transformer do allow a position to attend to itself.
# Such behavior is undesirable in a shared-QK formulation because the dot-product
# of a query vector with itself will almost always be greater than the dot product of a
# query vector with a vector at another position. We therefore modify the masking
# to forbid a token from attending to itself, except in situations
# where a token has no other valid attention targets (e.g. the first token in a sequence) "
self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(
query_bucket_idx.device
)
# apply self_mask
query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)
# free memory
del self_mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
# dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del query_key_dots
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if out_vectors.ndim > 4:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return out_vectors, logits, attention_probs
def _compute_attn_mask(
self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention
):
# attention mask for LSH
if attention_mask is not None:
# if chunked attention, the attention mask has to correspond to LSH order
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
# expand attn_mask to fit with key_value_bucket_idx shape
attention_mask = attention_mask[:, None, :]
attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))
# extract attention mask from LSH sorted key_indices
attention_mask = torch.gather(attention_mask, -1, key_indices)
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
def _get_relevant_hid_states_and_buckets(
self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets
):
# concat hidden states
hidden_states = torch.cat([past_states, hidden_states], dim=1)
# batch_size hidden
batch_size = hidden_states.shape[0]
sequence_length = hidden_states.shape[1]
# check if cached buckets include pad bucket
max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets)
# if pad bucket was cached => need to increase num buckets for caching
increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1
# retrieve query buckets
query_buckets = self._hash_vectors(
query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets
)
# concat buckets
concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1)
# hash-based sort
bucket_idx = _stable_argsort(concat_buckets, dim=-1)
# bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength
assert bucket_idx.shape == (
batch_size,
self.num_attention_heads,
num_hashes,
sequence_length,
), f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but has shape {bucket_idx.shape}."
# find indices of new bucket indices
relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero()
# expand relevant bucket indices to its chunks
relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length)
relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))]
# adapt bucket_idx for batch and hidden states for index select
bucket_idx_batch_offset = sequence_length * (
batch_size
* torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long)
// relevant_bucket_idx_chunk.shape[-1]
)
# add batch offset
relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset
hidden_states = hidden_states.reshape((-1, self.hidden_size))
# select all relevant hidden states
relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch)
# reshape hidden states and bucket_idx to correct output
relevant_hidden_states = relevant_hidden_states.reshape(
batch_size, self.num_attention_heads, -1, self.hidden_size
)
relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape(
batch_size, self.num_attention_heads, num_hashes, -1
)
assert (
relevant_hidden_states.shape[2]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes
), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`, there are {relevant_hidden_states.shape[2]} `hidden_states`."
assert (
relevant_bucket_idx_chunk.shape[-1]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length
), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`."
return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets
def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):
# get relevant indices of where chunk starts and its size
start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length
total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after)
# expand start indices and add correct chunk offset via arange
expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size)
chunk_sequence_indices = expanded_start_indices + torch.arange(
total_chunk_size, device=indices.device, dtype=torch.long
).unsqueeze(0).expand(indices.shape[0], total_chunk_size)
# make sure that circular logic holds via % seq len
chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length
# expand indices and set indices correctly
indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone()
indices[:, -1] = chunk_sequence_indices
return indices
def _len_and_dim_norm(self, vectors):
"""
length and attention head size dim normalization
"""
vectors = self._len_norm(vectors)
vectors = vectors * torch.rsqrt(
torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype)
)
return vectors
def _len_norm(self, x, epsilon=1e-6):
"""
length normalization
"""
variance = torch.mean(x ** 2, -1, keepdim=True)
norm_x = x * torch.rsqrt(variance + epsilon)
return norm_x
def _gather_by_expansion(self, vectors, idxs, num_hashes):
"""
expand dims of idxs and vectors for all hashes and gather
"""
expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)
vectors = vectors.repeat(1, 1, num_hashes, 1)
return torch.gather(vectors, 2, expanded_idxs)
class ReverseSort(Function):
"""
After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized
backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here.
"""
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
# save sorted_bucket_idx for backprop
with torch.no_grad():
ctx.sorted_bucket_idx = sorted_bucket_idx
# undo sort to have correct order for next layer
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
return out_vectors, logits
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
# get parameters saved in ctx
sorted_bucket_idx = ctx.sorted_bucket_idx
expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)
# reverse sort of forward
grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)
grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)
# return grad and `None` fillers for last 2 forward args
return grad_out_vectors, grad_logits, None, None
class LocalSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.chunk_length = config.local_attn_chunk_length
self.num_chunks_before = config.local_num_chunks_before
self.num_chunks_after = config.local_num_chunks_after
self.is_decoder = config.is_decoder
self.pad_token_id = config.pad_token_id
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
# projection matrices
self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.dropout = config.local_attention_probs_dropout_prob
# save mask value here
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
past_buckets_states=None,
use_cache=False,
output_attentions=False,
**kwargs
):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
# check if cache shall be used and that hidden states are already cached
if use_cache and past_buckets_states[1] is not None:
assert (
past_buckets_states[0] is None
), "LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching hidden_states_and_buckets."
key_value_hidden_states = self._retrieve_relevant_hidden_states(
past_buckets_states[1], self.chunk_length, self.num_chunks_before
)
key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1)
# only query vector for last token
query_vectors = self.query(hidden_states)
# compute key and value for relevant chunk
key_vectors = self.key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
# free memory
del key_value_hidden_states
else:
# project hidden_states to query, key and value
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
# split last dim into `config.num_attention_heads` and `config.attention_head_size`
query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size)
key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)
assert (
query_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
query_vectors.shape[-1], self.attention_head_size
)
assert (
key_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
key_vectors.shape[-1], self.attention_head_size
)
assert (
value_vectors.shape[-1] == self.attention_head_size
), "last dim of query_key_vectors is {} but should be {}.".format(
value_vectors.shape[-1], self.attention_head_size
)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
# normalize key vectors
key_vectors = key_vectors / torch.sqrt(
torch.tensor(self.attention_head_size, device=key_vectors.device, dtype=key_vectors.dtype)
)
# get sequence length indices
indices = torch.arange(sequence_length, device=query_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# if one should do normal n^2 self-attention
do_standard_self_attention = sequence_length <= self.chunk_length
# if input should be chunked
if not do_standard_self_attention:
# chunk vectors
# B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size
query_vectors = self._split_seq_length_dim_to(
query_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
key_vectors = self._split_seq_length_dim_to(
key_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
# chunk indices
query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
# append chunks before and after
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after)
else:
query_indices = key_indices = indices
# query-key matmul: QK^T
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
mask = self._compute_attn_mask(
query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention
)
if mask is not None:
# get mask tensor depending on half precision or not
if query_key_dots.dtype == torch.float16:
mask_value = self.mask_value_float16.half()
else:
mask_value = self.mask_value_float32
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# softmax
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del logits
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if not do_standard_self_attention:
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
)
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs)
def _compute_attn_mask(
self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention
):
# chunk attention mask and look before and after
if attention_mask is not None:
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1)
attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after)
# create attn_mask
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
@staticmethod
def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before):
start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length
return previous_hidden_states[:, start_position:]
class ReformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
all_head_size = config.num_attention_heads * config.attention_head_size
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ReformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attn_layers = config.attn_layers
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh":
self.self_attention = LSHSelfAttention(config)
elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local":
self.self_attention = LocalSelfAttention(config)
elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == set(["lsh", "local"]):
# get correct attn layers
if self.attn_layers[self.layer_id] == "lsh":
self.self_attention = LSHSelfAttention(config)
else:
self.self_attention = LocalSelfAttention(config)
else:
raise NotImplementedError(
"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.".format(
self.attn_layers
)
)
self.output = ReformerSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
buckets=None,
):
hidden_states = self.layer_norm(hidden_states)
# make sure cached hidden states is set to None for backward pass
if past_buckets_states is not None:
past_buckets_states_layer = past_buckets_states[self.layer_id]
else:
past_buckets_states_layer = None
# use cached buckets for backprob if buckets not None for LSHSelfAttention
self_attention_outputs = self.self_attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states_layer,
use_cache=use_cache,
output_attentions=output_attentions,
buckets=buckets,
)
# add buckets if necessary
if hasattr(self_attention_outputs, "buckets"):
buckets = self_attention_outputs.buckets
else:
buckets = None
# cache hidden states for future use
if use_cache:
if past_buckets_states[self.layer_id][0] is None:
# padded input should not be cached
past_buckets = (
buckets[:, :, :, :orig_sequence_length]
if (buckets is not None and orig_sequence_length > 1)
else buckets
)
else:
past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1)
if past_buckets_states[self.layer_id][1] is None:
# padded input should not be cached
past_states = hidden_states[:, :orig_sequence_length]
else:
past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1)
past_buckets_states[self.layer_id] = (past_buckets, past_states)
# compute attention feed forward output
attention_output = self.output(self_attention_outputs.hidden_states)
return AttentionOutput(
hidden_states=attention_output,
attention_probs=self_attention_outputs.attention_probs,
buckets=buckets,
)
class ReformerFeedForwardDense(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
if isinstance(config.hidden_act, str):
self.act_fn = ACT2FN[config.hidden_act]
else:
self.act_fn = config.hidden_act
self.dense = nn.Linear(config.hidden_size, config.feed_forward_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.act_fn(hidden_states)
return hidden_states
class ReformerFeedForwardOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ChunkReformerFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense = ReformerFeedForwardDense(config)
self.output = ReformerFeedForwardOutput(config)
def forward(self, attention_output):
return apply_chunking_to_forward(
self.forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
def forward_chunk(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dense(hidden_states)
return self.output(hidden_states)
class ReformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = ReformerAttention(config, layer_id)
# dropout requires to have the same
# seed for forward and backward pass
self.attention_seed = None
self.feed_forward_seed = None
self.feed_forward = ChunkReformerFeedForward(config)
def _init_attention_seed(self):
"""
This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1
normal forward call and 1 forward call in backward to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.attention_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.attention_seed)
def _init_feed_forward_seed(self):
"""
This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls:
1 normal forward call and 1 forward call in backward to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.feed_forward_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.feed_forward_seed)
def forward(
self,
prev_attn_output,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
):
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
if self.training:
self._init_attention_seed()
attn_outputs = self.attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = attn_outputs.hidden_states
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# Y_1 = X_1 + f(X_2)
attn_output = prev_attn_output + attn_output
# free memory
del prev_attn_output
# every forward pass we sample a different seed
# for dropout and save seed for forward fn in backward
# to have correct dropout
if self.training:
self._init_feed_forward_seed()
# Y_2 = X_2 + g(Y_1)
hidden_states = hidden_states + self.feed_forward(attn_output)
return ReformerOutput(
attn_output=attn_output,
hidden_states=hidden_states,
attention_probs=attn_outputs.attention_probs,
buckets=attn_outputs.buckets,
)
def backward_pass(
self,
next_attn_output,
hidden_states,
grad_attn_output,
grad_hidden_states,
attention_mask=None,
head_mask=None,
buckets=None,
):
# Implements the backward pass for reversible ResNets.
# A good blog post on how this works can be found here:
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
with torch.enable_grad():
next_attn_output.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.feed_forward_seed)
# g(Y_1)
res_hidden_states = self.feed_forward(next_attn_output)
res_hidden_states.backward(grad_hidden_states, retain_graph=True)
with torch.no_grad():
# X_2 = Y_2 - g(Y_1)
hidden_states = hidden_states - res_hidden_states
del res_hidden_states
grad_attn_output = grad_attn_output + next_attn_output.grad
next_attn_output.grad = None
with torch.enable_grad():
hidden_states.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.attention_seed)
# f(X_2)
# use cached buckets for backprob if buckets not None for LSHSelfAttention
output = self.attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
buckets=buckets,
).hidden_states
output.backward(grad_attn_output, retain_graph=True)
with torch.no_grad():
# X_1 = Y_1 - f(X_2)
attn_output = next_attn_output - output
del output, next_attn_output
grad_hidden_states = grad_hidden_states + hidden_states.grad
hidden_states.grad = None
hidden_states = hidden_states.detach()
return ReformerBackwardOutput(
attn_output=attn_output,
hidden_states=hidden_states,
grad_attn_output=grad_attn_output,
grad_hidden_states=grad_hidden_states,
)
class _ReversibleFunction(Function):
"""
To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here.
This way it is made sure that no memory expensive activations are saved during the forward pass. This function is
heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
"""
@staticmethod
def forward(
ctx,
hidden_states,
layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
):
all_buckets = ()
# split duplicated tensor
hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1)
for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)):
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
layer_outputs = layer(
prev_attn_output=attn_output,
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=layer_head_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = layer_outputs.attn_output
hidden_states = layer_outputs.hidden_states
all_buckets = all_buckets + (layer_outputs.buckets,)
if output_attentions:
all_attentions.append(layer_outputs.attention_probs)
# Add last layer
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
# attach params to ctx for backward
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
ctx.layers = layers
ctx.all_buckets = all_buckets
ctx.head_mask = head_mask
ctx.attention_mask = attention_mask
# Concatenate 2 RevNet outputs
return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
# retrieve params from ctx for backward
attn_output, hidden_states = ctx.saved_tensors
# create tuple
output = ReformerBackwardOutput(
attn_output=attn_output,
hidden_states=hidden_states,
grad_attn_output=grad_attn_output,
grad_hidden_states=grad_hidden_states,
)
# free memory
del grad_attn_output, grad_hidden_states, attn_output, hidden_states
layers = ctx.layers
all_buckets = ctx.all_buckets
head_mask = ctx.head_mask
attention_mask = ctx.attention_mask
for idx, layer in enumerate(layers[::-1]):
# pop last buckets from stack
buckets = all_buckets[-1]
all_buckets = all_buckets[:-1]
# backprop
output = layer.backward_pass(
next_attn_output=output.attn_output,
hidden_states=output.hidden_states,
grad_attn_output=output.grad_attn_output,
grad_hidden_states=output.grad_hidden_states,
head_mask=head_mask[len(layers) - idx - 1],
attention_mask=attention_mask,
buckets=buckets,
)
assert all_buckets == (), "buckets have to be empty after backpropagation"
grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1)
# num of return vars has to match num of forward() args
# return gradient for hidden_states arg and None for other args
return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None
class ReformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)])
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * hidden_size
self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_hidden_states=False,
output_attentions=False,
):
# hidden_states and attention lists to be filled if wished
all_hidden_states = []
all_attentions = []
# init cached hidden states if necessary
if past_buckets_states is None:
past_buckets_states = [((None), (None)) for i in range(len(self.layers))]
# concat same tensor for reversible ResNet
hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)
hidden_states = _ReversibleFunction.apply(
hidden_states,
self.layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
)
# Apply layer norm to concatenated hidden states
hidden_states = self.layer_norm(hidden_states)
# Apply dropout
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return ReformerEncoderOutput(
hidden_states=hidden_states,
all_hidden_states=all_hidden_states,
all_attentions=all_attentions,
past_buckets_states=past_buckets_states,
)
class ReformerOnlyLMHead(nn.Module):
def __init__(self, config):
super().__init__()
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * hidden_size
self.seq_len_dim = 1
self.chunk_size_lm_head = config.chunk_size_lm_head
self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
class ReformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ReformerConfig
base_model_prefix = "reformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, AxialPositionEmbeddings):
for weight in module.weights:
torch.nn.init.normal_(weight, std=self.config.axial_norm_std)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@dataclass
class ReformerModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.ReformerModel`.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first
element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,
hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to
speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ReformerModelWithLMHeadOutput(ModelOutput):
"""
Output type of :class:`~transformers.ReformerModelWithLMHead`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first
element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,
hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to
speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
TTuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
REFORMER_START_DOCSTRING = r"""
Reformer was proposed in `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.0445>`__ by Nikita
Kitaev, Łukasz Kaiser, Anselm Levskaya.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.ReformerConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
REFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using :class:`~transformers.ReformerTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
num_hashes (:obj:`int`, `optional`):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in :obj:`config.num_hashes`.
For more information, see :obj:`num_hashes` in :class:`~transformers.ReformerConfig`.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`):
List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first
element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,
hidden_size)`).
Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed
up sequential decoding.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Reformer Model transformer outputting raw hidden-states" "without any specific head on top.",
REFORMER_START_DOCSTRING,
)
class ReformerModel(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
assert (
self.config.num_hidden_layers > 0
), "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']"
self.embeddings = ReformerEmbeddings(config)
self.encoder = ReformerEncoder(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=ReformerModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
past_buckets_states=None,
use_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size() # noqa: F841
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1] # noqa: F841
device = inputs_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
assert (
len(input_shape) == 2
), "`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {}".format(input_shape)
if past_buckets_states is not None:
assert not self.training, "`past_buckets_states` can only be used for inference, not for training`."
# prepare head mask
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True)
# original sequence length for padding
orig_sequence_length = input_shape[-1]
# if needs padding
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
min_chunk_length = _get_min_chunk_len(self.config)
must_pad_to_match_chunk_length = (
input_shape[-1] % least_common_mult_chunk_length != 0
and input_shape[-1] > min_chunk_length
and past_buckets_states is None
)
if must_pad_to_match_chunk_length:
padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length
if self.training is True:
raise ValueError(
"If training, sequence Length {} has to be a multiple of least common multiple chunk_length {}. Please consider padding the input to a length of {}.".format(
input_shape[-1], least_common_mult_chunk_length, input_shape[-1] + padding_length
)
)
# pad input
input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length(
input_ids,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
input_shape=input_shape,
padding_length=padding_length,
padded_seq_length=least_common_mult_chunk_length,
device=device,
)
# start index for position encoding depends on incremental decoding
if past_buckets_states is not None:
start_idx_pos_encodings = past_buckets_states[0][1].shape[1]
else:
start_idx_pos_encodings = 0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
start_idx_pos_encodings=start_idx_pos_encodings,
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
sequence_output = encoder_outputs.hidden_states
# if padding was applied
if must_pad_to_match_chunk_length:
sequence_output = sequence_output[:, :orig_sequence_length]
past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None
hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None
attentions = encoder_outputs.all_attentions if output_attentions else None
if not return_dict:
return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None)
return ReformerModelOutput(
last_hidden_state=sequence_output,
past_buckets_states=past_buckets_states,
hidden_states=hidden_states,
attentions=attentions,
)
def _pad_to_mult_of_chunk_length(
self,
input_ids,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
input_shape=None,
padding_length=None,
padded_seq_length=None,
device=None,
):
logger.info(
"Input ids are automatically padded from {} to {} to be a multiple of `config.chunk_length`: {}".format(
input_shape[-1], input_shape[-1] + padding_length, padded_seq_length
)
)
padded_input_ids = torch.full(
(input_shape[0], padding_length),
self.config.pad_token_id,
device=device,
dtype=torch.long,
)
# Extend `attention_mask`
if attention_mask is not None:
pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype)
attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1)
else:
attention_mask = torch.cat(
[
torch.ones(input_shape, device=device, dtype=torch.uint8),
torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.uint8),
],
dim=-1,
)
# Extend `input_ids` with padding to match least common multiple chunk_length
if input_ids is not None:
input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)
input_shape = input_ids.size()
# Pad position ids if given
if position_ids is not None:
padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device)
padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length)
position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)
# Extend `inputs_embeds` with padding to match least common multiple chunk_length
if inputs_embeds is not None:
padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids)
inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)
input_shape = inputs_embeds.size()
return input_ids, inputs_embeds, attention_mask, position_ids, input_shape
@add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING)
class ReformerModelWithLMHead(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert config.is_decoder, "If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`."
assert (
"local" not in self.config.attn_layers or config.local_num_chunks_after == 0
), f"If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not {config.local_num_chunks_after}."
assert (
"lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0
), f"If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not {config.lsh_num_chunks_after}."
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
past_buckets_states=None,
use_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0,
..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return ReformerModelWithLMHeadOutput(
loss=loss,
logits=logits,
past_buckets_states=reformer_outputs.past_buckets_states,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, use_cache=None, num_hashes=None, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past is not None:
input_ids = input_ids[:, -1:]
inputs_dict = {
"input_ids": input_ids,
"past_buckets_states": past,
"use_cache": use_cache,
"num_hashes": num_hashes,
}
return inputs_dict
def _reorder_cache(self, past, beam_idx):
reord_past_buckets_states = []
for layer_past in past:
# buckets
if layer_past[0] is not None:
reord_buckets = layer_past[0].index_select(0, beam_idx)
else:
reord_buckets = None
# hidden states
reord_hidden_states = layer_past[1].index_select(0, beam_idx)
reord_past_buckets_states.append((reord_buckets, reord_hidden_states))
return reord_past_buckets_states
@add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING)
class ReformerForMaskedLM(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert (
not config.is_decoder
), "If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention."
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
labels=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
use_cache=False, # no causal mask
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=logits,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
@add_start_docstrings(
"""
Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
REFORMER_START_DOCSTRING,
)
class ReformerForSequenceClassification(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.reformer = ReformerModel(config)
self.classifier = ReformerClassificationHead(config)
if config.is_decoder is True:
logger.warning("You might want to disable causal masking for sequence classification")
self.init_weights()
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
labels=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class ReformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states, **kwargs):
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
@add_start_docstrings(
"""
Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA
( a linear layer on top of hidden-states output to compute `span start logits` and `span end logits`.
""",
REFORMER_START_DOCSTRING,
)
class ReformerForQuestionAnswering(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.reformer = ReformerModel(config)
# 2 * config.hidden_size because we use reversible residual layers
self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/reformer-crime-and-punishment",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
start_positions=None,
end_positions=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
use_cache=False, # no causal mask
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + reformer_outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ParameterList",
"torch.einsum",
"torch.enable_grad",
"torch.cuda.current_device",
"torch.ones",
"torch.seed",
"torch.exp",
"torch.nn.CrossEntropyLoss",
"torch.chunk",
"torch.where",
"torch.reshape",
"torch.rsqrt",
"torch.sum",
"torch.nn.LayerNorm",
"torch.gather",
"torch.argsort",
"torch.manual_seed",
"torch.nn.init.normal_",
"torch.tensor",
"torch.index_select",
"torch.nn.Embedding",
"torch.zeros",
"torch.nn.functional.dropout",
"torch.full",
"torch.nn.functional.dropout2d",
"torch.mean",
"torch.matmul",
"torch.argmax",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.no_grad",
"torch.logsumexp",
"torch.ones_like",
"torch.tanh",
"torch.randn"
] | 1.0 | ceostroff/transformers | 3095ee9dab739f212a8753b5be4e1a72ba42e28e |
1.0 | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLConfig, CTRLLMHeadModel, CTRLModel
class CTRLModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.is_training = True
self.use_token_type_ids = True
self.use_input_mask = True
self.use_labels = True
self.use_mc_token_ids = True
self.vocab_size = 99
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
@require_torch
class CTRLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (CTRLModel, CTRLLMHeadModel) if is_torch_available() else ()
all_generative_model_classes = (CTRLLMHeadModel,) if is_torch_available() else ()
test_pruning = True
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = CTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class CTRLModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_ctrl(self):
model = CTRLLMHeadModel.from_pretrained("ctrl")
model.to(torch_device)
input_ids = torch.tensor(
[[11859, 0, 1611, 8]], dtype=torch.long, device=torch_device
) # Legal the president is
expected_output_ids = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| [
"torch.tensor"
] | 1.0 | ceostroff/transformers | 3095ee9dab739f212a8753b5be4e1a72ba42e28e |
1.0 | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : RelGAN_D.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import math
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import config as cfg
from utils.helpers import truncated_normal_
n_heads = 4
n_transformer_layers = 3
class OurGAN_D(nn.Module):
def __init__(self, embed_dim, max_seq_len, vocab_size, padding_idx, gpu=False, dropout=0.25):
super(OurGAN_D, self).__init__()
self.embed_dim = embed_dim
self.max_seq_len = max_seq_len
self.gpu = gpu
self.embeddings = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(vocab_size, embed_dim, bias=False),
nn.Tanh()
)
# Returns BxTxD
self.transformer = nn.TransformerEncoder(
nn.TransformerEncoderLayer(embed_dim, nhead=n_heads),
n_transformer_layers,
norm=nn.LayerNorm(self.embed_dim)
)
self.fc1 = nn.Sequential(
nn.Linear(self.embed_dim * self.max_seq_len, self.embed_dim),
nn.LeakyReLU(0.2)
)
self.fc2 = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(self.embed_dim, 100),
nn.LeakyReLU(0.2)
)
self.fc3 = nn.Sequential(
nn.Linear(100, 1),
nn.Sigmoid()
)
self.init_params()
self.pos_encoding = self.positional_encoding()
def positional_encoding(self):
# From Assignment 3
pos_indices = torch.arange(self.max_seq_len)[..., None]
dim_indices = torch.arange(self.embed_dim//2)[None, ...]
exponents = (2*dim_indices).float()/(self.embed_dim)
trig_args = pos_indices / (10000**exponents)
sin_terms = torch.sin(trig_args)
cos_terms = torch.cos(trig_args)
pos_encodings = torch.zeros((self.max_seq_len, self.embed_dim))
pos_encodings[:, 0::2] = sin_terms
pos_encodings[:, 1::2] = cos_terms
if self.gpu:
pos_encodings = pos_encodings.cuda()
return pos_encodings
def forward(self, inp):
"""
Get logits of discriminator
:param inp: batch_size * seq_len * vocab_size
:return logits: [batch_size * num_rep] (1-D tensor)
"""
emb = self.embeddings(inp) # batch_size * max_seq_len * embed_dim
seqlen = inp.size(1)
emb = emb + self.pos_encoding[:seqlen]
trans = self.transformer(emb) # batch * max_seq_len * embed_dim
x = self.fc1(trans.flatten(start_dim=1))
x = self.fc2(x)
x = self.fc3(x)
return x
def init_params(self):
for param in self.parameters():
if param.requires_grad and len(param.shape) > 0:
stddev = 1 / math.sqrt(param.shape[0])
if cfg.dis_init == 'uniform':
torch.nn.init.uniform_(param, a=-0.05, b=0.05)
elif cfg.dis_init == 'normal':
torch.nn.init.normal_(param, std=stddev)
elif cfg.dis_init == 'truncated_normal':
truncated_normal_(param, std=stddev)
| [
"torch.zeros",
"torch.cos",
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.LayerNorm",
"torch.sin",
"torch.nn.Sigmoid",
"torch.arange",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.init.normal_",
"torch.nn.init.uniform_",
"torch.nn.TransformerEncoderLayer"
] | 1.0.0 | JustinLokHinWu/TextGAN-PyTorch | 427f08890056a96fde7e5b67c26c3bb9f5a420a4 |
0.4 | import torch.utils.data as data
import os
import os.path
import numpy as np
from numpy.random import randint
import torch
from colorama import init
from colorama import Fore, Back, Style
import random
from os import listdir
from os.path import join, splitext
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from PIL import Image, ImageFilter, ImageFile
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
init(autoreset=True)
class VideoRecord(object):
def __init__(self, row):
self._data = row
@property
def path(self):
return self._data[0]
@property
def num_frames(self):
return int(self._data[1])
@property
def label(self):
return int(self._data[2])
class TSNDataSet(data.Dataset):
def __init__(self, root_path, list_file, num_dataload,
num_segments=3, new_length=1, modality='RGB',
image_tmpl='img_{:05d}.t7', transform=None,
force_grayscale=False, random_shift=True, test_mode=False):
self.root_path = root_path
self.list_file = list_file
self.num_segments = num_segments
self.new_length = new_length
self.modality = modality
self.image_tmpl = image_tmpl
self.transform = transform
self.random_shift = random_shift
self.test_mode = test_mode
self.num_dataload = num_dataload
if self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
self.new_length += 1 # Diff needs one more image to calculate diff
self._parse_list() # read all the video files
def _load_feature(self, directory, idx):
if self.modality == 'RGB' or self.modality == 'RGBDiff' or self.modality == 'RGBDiff2' or self.modality == 'RGBDiffplus':
feat_path = os.path.join(directory, self.image_tmpl.format(idx))
try:
feat = [torch.load(feat_path)]
except:
print(Back.RED + feat_path)
return feat
elif self.modality == 'Flow':
x_feat = torch.load(os.path.join(directory, self.image_tmpl.format('x', idx)))
y_feat = torch.load(os.path.join(directory, self.image_tmpl.format('y', idx)))
return [x_feat, y_feat]
def _parse_list(self):
self.video_list = [VideoRecord(x.strip().split(' ')) for x in open(self.list_file)]
# repeat the list if the length is less than num_dataload (especially for target data)
n_repeat = self.num_dataload//len(self.video_list)
n_left = self.num_dataload%len(self.video_list)
self.video_list = self.video_list*n_repeat + self.video_list[:n_left]
def _sample_indices(self, record):
"""
:param record: VideoRecord
:return: list
"""
#np.random.seed(1)
average_duration = (record.num_frames - self.new_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration, size=self.num_segments)
elif record.num_frames > self.num_segments:
offsets = np.sort(randint(record.num_frames - self.new_length + 1, size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_val_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
return offsets + 1
def _get_test_indices(self, record):
num_min = self.num_segments + self.new_length - 1
num_select = record.num_frames - self.new_length + 1
if record.num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = np.ones(self.num_segments-num_select,dtype=int)*id_select[id_select[0]-1]
offsets = np.append(id_select, id_expand)
return offsets + 1
def __getitem__(self, index):
record = self.video_list[index]
if not self.test_mode:
segment_indices = self._sample_indices(record) if self.random_shift else self._get_val_indices(record)
else:
segment_indices = self._get_test_indices(record)
return self.get(record, segment_indices)
def get(self, record, indices):
frames = list()
for seg_ind in indices:
p = int(seg_ind)
for i in range(self.new_length):
seg_feats = self._load_feature(record.path, p)
frames.extend(seg_feats)
if p < record.num_frames:
p += 1
# process_data = self.transform(frames)
process_data = torch.stack(frames)
return process_data, record.label
def __len__(self):
return len(self.video_list)
class VideoDataset(data.Dataset):
def __init__(
self,
folder,
n_frames,
frame_size=224,
separator="_"
):
self.folder = folder
self.num_segments = n_frames
self.frame_size = frame_size
self.data_transform = transforms.Compose(
[
transforms.Resize(self.frame_size),
transforms.CenterCrop(self.frame_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
self.separator = separator
self.classes = [c for c in sorted(listdir(folder))]
self.videos_with_classes = []
for c_index, c in enumerate(self.classes):
c_path = join(self.folder, c)
videos = listdir(c_path)
for v in videos:
v_path = join(c_path, v)
num_frames = len(listdir(v_path))
if num_frames >= self.num_segments:
pair = (v_path, c_index)
self.videos_with_classes.append(pair)
def _get_test_indices(self, num_frames):
num_min = self.num_segments
num_select = num_frames
if num_frames >= num_min:
tick = float(num_select) / float(self.num_segments)
offsets = np.array(
[int(tick / 2.0 + tick * float(x)) for x in range(self.num_segments)]
) # pick the central frame in each segment
else: # the video clip is too short --> duplicate the last frame
id_select = np.array([x for x in range(num_select)])
# expand to the length of self.num_segments with the last element
id_expand = (
np.ones(self.num_segments - num_select, dtype=int)
* id_select[id_select[0] - 1]
)
offsets = np.append(id_select, id_expand)
return offsets
def __getitem__(self, index):
video, label = self.videos_with_classes[index]
frames_temp = sorted(
listdir(video),
key=lambda path: int(path.split(self.separator)[-1].split(".")[0]),
)
frames = [f for f in frames_temp if f.endswith('jpg') or f.endswith('jpeg')]
num_frames = len(frames)
data = []
segment_indices = self._get_test_indices(num_frames)
for index in segment_indices:
frame = frames[index]
frame_path = join(video, frame)
frame_img = Image.open(frame_path)
frame_feat = self.data_transform(frame_img)
data.append(frame_feat)
tensor = torch.stack(data)
return tensor, label
def __len__(self):
return len(self.videos_with_classes) | [
"torch.utils.data.append",
"torch.stack",
"torch.load"
] | 0.4.1 | gzaraunitn/TA3N | d83ae5d9c8f4452ff69dd9002bb4016a695a4be8 |
1.10 | import os
import torch
from transformers import TrainerControl, TrainerState
from transformers.file_utils import WEIGHTS_NAME
from transformers.modeling_utils import PreTrainedModel
from transformers.trainer_callback import TrainerCallback
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from transformers.training_args import TrainingArguments
from ruprompts.prompt import Prompt
from ruprompts.prompt_embedding import PROMPT_PROVIDER_KEY_NAME
try:
import omegaconf
IS_OMEGACONF_AVAILABLE = True
except ImportError:
omegaconf = None
IS_OMEGACONF_AVAILABLE = False
try:
import wandb
IS_WANDB_AVAILABLE = True
except ImportError:
wandb = None
IS_WANDB_AVAILABLE = False
class FreezeTransformerUnfreezePrompt(TrainerCallback):
"""Freezes all parameters but those of prompt provider."""
def on_train_begin(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
model: PreTrainedModel,
**kwargs,
):
for name, param in model.transformer.named_parameters():
if PROMPT_PROVIDER_KEY_NAME in name:
param.requires_grad = True
else:
param.requires_grad = False
class ReduceCheckpoint(TrainerCallback):
"""Reduces the checkpoint size by keeping only the weights of prompt provider."""
def on_save(
self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs
):
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}"
output_dir = os.path.join(args.output_dir, checkpoint_folder)
weights_path = os.path.join(output_dir, WEIGHTS_NAME)
weights = torch.load(weights_path)
keys_to_remove = []
for weight_key in weights:
if PROMPT_PROVIDER_KEY_NAME not in weight_key:
keys_to_remove.append(weight_key)
for key in keys_to_remove:
weights.pop(key)
torch.save(weights, weights_path)
class SavePretrainedPrompt(TrainerCallback):
"""Saves the prompt as pretrained on checkpoint.
Args:
prompt (# !s!`ruprompts.prompt.Prompt`): Prompt instance to be saved.
"""
def __init__(self, prompt: Prompt):
self.prompt = prompt
def on_save(
self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs
):
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}"
output_dir = os.path.join(args.output_dir, checkpoint_folder)
self.prompt.save_pretrained(output_dir)
class WBLogHydraConfig(TrainerCallback):
"""Logs Hydra config to Weights and Biases on training start.
Args:
cfg (omegaconf.DictConfig): Config to be logged.
"""
def __init__(self, cfg):
if not (IS_OMEGACONF_AVAILABLE and IS_WANDB_AVAILABLE):
raise UserWarning(
"WBLogHydraConfig is not available. Install `hydra` and `wandb` "
"with `pip install hydra-core wandb`."
)
self.cfg = cfg
def on_train_begin(
self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs
):
wandb.config.update({"hydra": omegaconf.OmegaConf.to_container(self.cfg)})
| [
"torch.save",
"torch.load"
] | 1.10.0 | sberbank-ai/ru-prompts | 4eeedae92cb5234c70adc787ace7cfceb76b0be0 |
1.2 | import torch
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
from model.mem_transformer import MemTransformerLM
from model.gru_ae import *
import numpy as np
class Seq_Attention(BaseModel):
def __init__(
self,
transformer_state_path,
num_classes,
codes=True,
demographics=True,
demographics_size=0,
div_factor=2,
dropout=0.5,
):
super(Seq_Attention, self).__init__()
self.num_classes = num_classes
self.demographics = demographics
self.demographics_size = demographics_size
self.codes = codes
state_dict = torch.load(transformer_state_path)
transformer_config = state_dict["config"]
state_dict = state_dict["state_dict"]
transformer_args = transformer_config["model"]["args"]
self.transformer = MemTransformerLM(**transformer_args)
self.transformer.load_state_dict(state_dict)
self.transformer.eval()
self.patient_rep_size = +self.transformer.d_embed * int(
self.codes
) + self.demographics_size * int(self.demographics)
self.predictor = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(
self.patient_rep_size,
self.patient_rep_size // div_factor,
),
nn.ReLU(inplace=True),
nn.Linear(self.patient_rep_size // div_factor, self.num_classes),
)
def forward(self, x, device="cuda"):
x_codes, x_cl, b_is, demo = x
x_codes = x_codes.to(device)
x_cl = x_cl.to(device)
demo = demo.to(device)
b_is = b_is.to(device)
#patient_rep = torch.Tensor([]).to(device)
patient_rep = None
with torch.no_grad():
mem_out = self.transformer._forward(x_codes)
mem_out = mem_out[x_cl, b_is, :]
if self.codes and self.demographics:
patient_rep = torch.cat((mem_out, demo), dim=1)
elif self.codes and not self.demographics:
patient_rep = mem_out
elif not self.codes and self.demographics:
patient_rep = demo
else:
raise ValueError("codes and demographics can be false at the same time")
# if self.demographics:
# if len(patient_rep.shape) == 0:
# patient_rep = demo
# else:
# if len(patient_rep.shape) == 1:
# patient_rep = patient_rep.unsqueeze(dim=0)
# patient_rep = torch.cat((patient_rep, demo), dim=1)
logits = self.predictor(patient_rep)
if self.num_classes > 1:
log_probs = F.log_softmax(logits, dim=1).squeeze()
else:
log_probs = torch.sigmoid(logits)
if len(logits) == 1:
logits = logits.squeeze(dim=0)
log_probs = log_probs.squeeze(dim=0)
else:
logits = logits.squeeze()
log_probs = log_probs.squeeze()
return log_probs, logits
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters if p is not None])
return "\nTrainable parameters: {}".format(params) | [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.cat",
"torch.no_grad",
"torch.nn.functional.log_softmax",
"torch.nn.ReLU",
"torch.load"
] | 1.2.0 | empyriumz/TAPER-EHR | fc89a27730a6eb6d4b321832e017c7e9662fa2e3 |
1.6 | from __future__ import print_function
from __future__ import division
import math
import torch
import torch.nn as nn
from torch.nn import Parameter
from torchkit.head.localfc.common import calc_logits
class CosFace(nn.Module):
""" Implement of CosFace (https://arxiv.org/abs/1801.09414)
"""
def __init__(self,
in_features,
out_features,
scale=64.0,
margin=0.40):
""" Args:
in_features: size of each input features
out_features: size of each output features
scale: norm of input feature
margin: margin
"""
super(CosFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.scale = scale
self.margin = margin
self.kernel = Parameter(torch.FloatTensor(in_features, out_features))
# nn.init.xavier_uniform_(self.kernel)
nn.init.normal_(self.kernel, std=0.01)
# init.kaiming_uniform_(self.kernel, a=math.sqrt(5))
def forward(self, embeddings, labels):
cos_theta, origin_cos = calc_logits(embeddings, self.kernel)
target_logit = cos_theta[torch.arange(0, embeddings.size(0)), labels].view(-1, 1)
final_target_logit = target_logit - self.margin
cos_theta.scatter_(1, labels.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.scale
return output, origin_cos * self.scale
| [
"torch.nn.init.normal_",
"torch.FloatTensor"
] | 1.6.0 | sarvex/TFace | 490cf90a1f042b86d7d03042f26d0a7cf6b1f0c0 |
1.1 | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li ([email protected])
# -----------------------------------------------------
import torch.nn as nn
from .builder import SPPE
from .layers.DUC import DUC
from .layers.SE_Resnet import SEResnet
@SPPE.register_module
class FastPose(nn.Module):
conv_dim = 128
def __init__(self, norm_layer=nn.BatchNorm2d, **cfg):
super(FastPose, self).__init__()
self._preset_cfg = cfg['PRESET']
if 'DCN' in cfg.keys():
stage_with_dcn = cfg['STAGE_WITH_DCN']
dcn = cfg['DCN']
self.preact = SEResnet(
f"resnet{cfg['NUM_LAYERS']}", dcn=dcn, stage_with_dcn=stage_with_dcn)
else:
self.preact = SEResnet(f"resnet{cfg['NUM_LAYERS']}")
# Imagenet pretrain model
import torchvision.models as tm # noqa: F401,F403
assert cfg['NUM_LAYERS'] in [18, 34, 50, 101, 152]
x = eval(f"tm.resnet{cfg['NUM_LAYERS']}(pretrained=True)")
model_state = self.preact.state_dict()
state = {k: v for k, v in x.state_dict().items()
if k in self.preact.state_dict() and v.size() == self.preact.state_dict()[k].size()}
model_state.update(state)
self.preact.load_state_dict(model_state)
self.suffle1 = nn.PixelShuffle(2)
self.duc1 = DUC(512, 1024, upscale_factor=2, norm_layer=norm_layer)
self.duc2 = DUC(256, 512, upscale_factor=2, norm_layer=norm_layer)
self.conv_out = nn.Conv2d(
self.conv_dim, self._preset_cfg['NUM_JOINTS'], kernel_size=3, stride=1, padding=1)
def forward(self, x):
out = self.preact(x)
out = self.suffle1(out)
out = self.duc1(out)
out = self.duc2(out)
out = self.conv_out(out)
return out
def _initialize(self):
for m in self.conv_out.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# logger.info('=> init {}.weight as normal(0, 0.001)'.format(name))
# logger.info('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
| [
"torch.nn.PixelShuffle",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.nn.init.constant_"
] | 1.1.0 | ericwang0701/AlphaPose | 1f17dbf4b41ad7452430b69f72d58a0585ed09af |
1.1 | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li ([email protected])
# -----------------------------------------------------
import os
import sys
import numpy as np
import torch
import torch.nn.functional as F
from .transforms import get_max_pred_batch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
class DataLogger(object):
"""Average data logger."""
def __init__(self):
self.clear()
def clear(self):
self.value = 0
self.sum = 0
self.cnt = 0
self.avg = 0
def update(self, value, n=1):
self.value = value
self.sum += value * n
self.cnt += n
self._cal_avg()
def _cal_avg(self):
self.avg = self.sum / self.cnt
def calc_iou(pred, target):
"""Calculate mask iou"""
if isinstance(pred, torch.Tensor):
pred = pred.cpu().data.numpy()
if isinstance(target, torch.Tensor):
target = target.cpu().data.numpy()
pred = pred >= 0.5
target = target >= 0.5
intersect = (pred == target) * pred * target
union = np.maximum(pred, target)
if pred.ndim == 2:
iou = np.sum(intersect) / np.sum(union)
elif pred.ndim == 3 or pred.ndim == 4:
n_samples = pred.shape[0]
intersect = intersect.reshape(n_samples, -1)
union = union.reshape(n_samples, -1)
iou = np.mean(np.sum(intersect, axis=1) / np.sum(union, axis=1))
return iou
def mask_cross_entropy(pred, target):
return F.binary_cross_entropy_with_logits(
pred, target, reduction='mean')[None]
def evaluate_mAP(res_file, ann_type='bbox', ann_file='person_keypoints_val2017.json', silence=True):
"""Evaluate mAP result for coco dataset.
Parameters
----------
res_file: str
Path to result json file.
ann_type: str
annotation type, including: `bbox`, `segm`, `keypoints`.
ann_file: str
Path to groundtruth file.
silence: bool
True: disable running log.
"""
class NullWriter(object):
def write(self, arg):
pass
ann_file = os.path.join('./data/coco/annotations/', ann_file)
if silence:
nullwrite = NullWriter()
oldstdout = sys.stdout
sys.stdout = nullwrite # disable output
cocoGt = COCO(ann_file)
cocoDt = cocoGt.loadRes(res_file)
cocoEval = COCOeval(cocoGt, cocoDt, ann_type)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if silence:
sys.stdout = oldstdout # enable output
stats_names = ['AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)',
'AR', 'AR .5', 'AR .75', 'AR (M)', 'AR (L)']
info_str = {}
for ind, name in enumerate(stats_names):
info_str[name] = cocoEval.stats[ind]
return info_str
def calc_accuracy(preds, labels):
"""Calculate heatmap accuracy."""
preds = preds.cpu().data.numpy()
labels = labels.cpu().data.numpy()
num_joints = preds.shape[1]
norm = 1.0
hm_h = preds.shape[2]
hm_w = preds.shape[3]
preds, _ = get_max_pred_batch(preds)
labels, _ = get_max_pred_batch(labels)
norm = np.ones((preds.shape[0], 2)) * np.array([hm_w, hm_h]) / 10
dists = calc_dist(preds, labels, norm)
acc = 0
sum_acc = 0
cnt = 0
for i in range(num_joints):
acc = dist_acc(dists[i])
if acc >= 0:
sum_acc += acc
cnt += 1
if cnt > 0:
return sum_acc / cnt
else:
return 0
def calc_dist(preds, target, normalize):
"""Calculate normalized distances"""
preds = preds.astype(np.float32)
target = target.astype(np.float32)
dists = np.zeros((preds.shape[1], preds.shape[0]))
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :] / normalize[n]
normed_targets = target[n, c, :] / normalize[n]
dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
"""Calculate accuracy with given input distance."""
dist_cal = np.not_equal(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
else:
return -1
| [
"torch.nn.functional.binary_cross_entropy_with_logits"
] | 1.1.0 | ericwang0701/AlphaPose | 1f17dbf4b41ad7452430b69f72d58a0585ed09af |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
import glob
import importlib
import logging
import os
import random
import sys
from datetime import datetime
import numpy as np
import torch
from omegaconf import OmegaConf, open_dict
def set_seed(seed):
if seed:
if seed == -1:
# From detectron2
seed = (
os.getpid()
+ int(datetime.now().strftime("%S%f"))
+ int.from_bytes(os.urandom(2), "big")
)
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
return seed
def import_user_module(user_dir: str):
"""Given a user dir, this function imports it as a module.
This user_module is expected to have an __init__.py at its root.
You can use import_files to import your python files easily in
__init__.py
Args:
user_dir (str): directory which has to be imported
"""
from mmf.common.registry import registry
from mmf.utils.general import get_absolute_path # noqa
logger = logging.getLogger(__name__)
if user_dir:
if registry.get("__mmf_user_dir_imported__", no_warning=True):
logger.info(f"User dir {user_dir} already imported. Skipping.")
return
# Allow loading of files as user source
if user_dir.endswith(".py"):
user_dir = user_dir[:-3]
dot_path = ".".join(user_dir.split(os.path.sep))
# In case of abspath which start from "/" the first char
# will be "." which turns it into relative module which
# find_spec doesn't like
if os.path.isabs(user_dir):
dot_path = dot_path[1:]
try:
dot_spec = importlib.util.find_spec(dot_path)
except ModuleNotFoundError:
dot_spec = None
abs_user_dir = get_absolute_path(user_dir)
module_parent, module_name = os.path.split(abs_user_dir)
# If dot path is found in sys.modules, or path can be directly
# be imported, we don't need to play jugglery with actual path
if dot_path in sys.modules or dot_spec is not None:
module_name = dot_path
else:
user_dir = abs_user_dir
logger.info(f"Importing from {user_dir}")
if module_name != dot_path:
# Since dot path hasn't been found or can't be imported,
# we can try importing the module by changing sys path
# to the parent
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
sys.modules["mmf_user_dir"] = sys.modules[module_name]
# Register config for user's model and dataset config
# relative path resolution
config = registry.get("config")
if config is None:
registry.register(
"config", OmegaConf.create({"env": {"user_dir": user_dir}})
)
else:
with open_dict(config):
config.env.user_dir = user_dir
registry.register("__mmf_user_dir_imported__", True)
def import_files(file_path: str, module_name: str = None):
"""The function imports all of the files present in file_path's directory.
This is useful for end user in case they want to easily import files without
mentioning each of them in their __init__.py. module_name if specified
is the full path to module under which all modules will be imported.
my_project/
my_models/
my_model.py
__init__.py
Contents of __init__.py
```
from mmf.utils.env import import_files
import_files(__file__, "my_project.my_models")
```
This will then allow you to import `my_project.my_models.my_model` anywhere.
Args:
file_path (str): Path to file in whose directory everything will be imported
module_name (str): Module name if this file under some specified structure
"""
for file in os.listdir(os.path.dirname(file_path)):
if file.endswith(".py") and not file.startswith("_"):
import_name = file[: file.find(".py")]
if module_name:
importlib.import_module(f"{module_name}.{import_name}")
else:
importlib.import_module(f"{import_name}")
def setup_imports():
from mmf.common.registry import registry
# First, check if imports are already setup
has_already_setup = registry.get("imports_setup", no_warning=True)
if has_already_setup:
return
# Automatically load all of the modules, so that
# they register with registry
root_folder = registry.get("mmf_root", no_warning=True)
if root_folder is None:
root_folder = os.path.dirname(os.path.abspath(__file__))
root_folder = os.path.join(root_folder, "..")
environment_mmf_path = os.environ.get("MMF_PATH", os.environ.get("PYTHIA_PATH"))
if environment_mmf_path is not None:
root_folder = environment_mmf_path
registry.register("pythia_path", root_folder)
registry.register("mmf_path", root_folder)
trainer_folder = os.path.join(root_folder, "trainers")
trainer_pattern = os.path.join(trainer_folder, "**", "*.py")
datasets_folder = os.path.join(root_folder, "datasets")
datasets_pattern = os.path.join(datasets_folder, "**", "*.py")
model_folder = os.path.join(root_folder, "models")
common_folder = os.path.join(root_folder, "common")
modules_folder = os.path.join(root_folder, "modules")
model_pattern = os.path.join(model_folder, "**", "*.py")
common_pattern = os.path.join(common_folder, "**", "*.py")
modules_pattern = os.path.join(modules_folder, "**", "*.py")
importlib.import_module("mmf.common.meter")
files = (
glob.glob(datasets_pattern, recursive=True)
+ glob.glob(model_pattern, recursive=True)
+ glob.glob(trainer_pattern, recursive=True)
+ glob.glob(common_pattern, recursive=True)
+ glob.glob(modules_pattern, recursive=True)
)
for f in files:
f = os.path.realpath(f)
if f.endswith(".py") and not f.endswith("__init__.py"):
splits = f.split(os.sep)
import_prefix_index = 0
for idx, split in enumerate(splits):
if split == "mmf":
import_prefix_index = idx + 1
file_name = splits[-1]
module_name = file_name[: file_name.find(".py")]
module = ".".join(["mmf"] + splits[import_prefix_index:-1] + [module_name])
importlib.import_module(module)
registry.register("imports_setup", True)
def setup_torchaudio():
# required for soundfile
try:
import libfb.py.ctypesmonkeypatch
libfb.py.ctypesmonkeypatch.install()
except ImportError:
pass
def teardown_imports():
from mmf.common.registry import registry
registry.unregister("pythia_path")
registry.unregister("mmf_path")
registry.unregister("imports_setup")
| [
"torch.manual_seed"
] | 1.6.0 | dk25021999/mmf | 218057265a3fc175f656b5ebe8fb44ef5ccca2e9 |
1.2 | from torch.distributions import Normal
import numpy as np
from torch import nn
from typing import Tuple
from tqdm.auto import trange
import torch
from deepqmc import Molecule
from deepqmc.pyscfext import eval_ao_normed, pyscf_from_mol
from deepqmc.wf import WaveFunction
from deepqmc.wf.paulinet.molorb import MolecularOrbital
from deepqmc.wf.paulinet.gto import GTOBasis
from deepqmc.physics import pairwise_diffs, local_energy
__all__ = ['Pretrainer']
class Pretrainer(nn.Module):
r""" Implements the FermiNet wave function Ansatz pretraining based on [pfau2020ab]
Provides tools for pretraining the Ansatz.
.. math:
Usage:
wf = FermiNet(mol, n_layers, nf_hidden_single, nf_hidden_pairwise, n_determinants).cuda()
pretrainer = Pretrainer(mol).cuda()
pretrainer.pretrain(wf)
Args:
mol (:class:`~deepqmc.Molecule`): molecule whose wave function is represented
basis (str): basis for the molecular orbitals
"""
def __init__(self,
mol,
basis: str = '6-311g',
device: str = 'cuda',
dtype: torch.dtype = torch.float32):
super(Pretrainer, self).__init__()
self.device = device
self.dtype = dtype
self.atom_positions = [x.cpu().numpy() for x in mol.coords.split(1, dim=0)]
self.ne_atoms = [int(i) for i in mol.charges]
self.mol = mol
self.n_elec = int(mol.charges)
self.n_up = (self.n_elec + mol.spin) // 2
self.n_down = (self.n_elec - mol.spin) // 2
self.n_atoms = len(self.mol)
self.n_orbitals = max(self.n_up, self.n_down) # if n_orbital is none return max of up or down
# cas and workdir set to None
self.mf, _ = pyscf_from_mol(mol, basis)
def compute_orbital_probability(self, samples: torch.Tensor) -> torch.Tensor:
up_dets, down_dets = self.hf_orbitals(samples)
spin_ups = up_dets ** 2
spin_downs = down_dets ** 2
p_up = torch.diagonal(spin_ups, dim1=-2, dim2=-1).prod(-1)
p_down = torch.diagonal(spin_downs, dim1=-2, dim2=-1).prod(-1)
probabilities = p_up * p_down
return probabilities.detach()
def hf_orbitals(self, samples: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
n_samples = samples.shape[0]
# mol = Molecule.from_name('H2O')
# mf, _ = pyscf_from_mol(mol, '6-31g')
# rs = torch.randn(100, 10, 3).double()
# mo = (eval_ao_normed(mf.mol, rs.flatten(end_dim=1).numpy()).reshape(100, 10, -1) @ mf.mo_coeff[:, :5])
#
samples = samples.flatten(end_dim=1).cpu().numpy()
determinants = (eval_ao_normed(self.mf.mol, samples).reshape(n_samples, self.n_elec, -1)
@ self.mf.mo_coeff[:, :self.n_orbitals])
determinants = torch.from_numpy(determinants).to(device=self.device, dtype=self.dtype)
up_dets, down_dets = determinants.split([self.n_up, self.n_down], dim=1)
up_dets, down_dets = up_dets[:, :, :up_dets.shape[1]], down_dets[:, :, :down_dets.shape[1]]
return up_dets, down_dets
def pretrain(self,
wf: WaveFunction,
n_samples: int = 1024,
n_steps: int = 1000,
lr: float = 1e-4):
sampler = MetropolisHastingsPretrain()
opt = torch.optim.Adam(list(wf.parameters())[:-1], lr=lr)
steps = trange(
0, # init_step = 0
n_steps,
initial=0,
total=n_steps,
desc='pretraining',
disable=None,
)
samples = initialize_samples(self.ne_atoms, self.atom_positions, n_samples).to(device=self.device, dtype=self.dtype)
for step in steps:
Es_loc, _, _ = local_energy(
samples,
wf.sample(False),
create_graph=False,
keep_graph=False,
)
samples = sampler(wf, self, samples)
up_dets, down_dets = self.hf_orbitals(samples)
up_dets = tile_labels(up_dets, wf.n_determinants)
down_dets = tile_labels(down_dets, wf.n_determinants)
wf.pretraining = True
model_up_dets, model_down_dets = wf(samples)
wf.pretraining = False
loss = mse_error(up_dets, model_up_dets)
loss += mse_error(down_dets, model_down_dets)
opt.zero_grad()
loss.backward() # in order for hook to work must call backward
opt.step()
steps.set_postfix(E=f'{Es_loc.mean():.6f}')
# print('iteration: ', step, ' energy: ', Es_loc.mean())
def mse_error(targets: torch.Tensor, outputs: torch.Tensor) -> torch.Tensor:
return ((targets - outputs)**2).mean(0).sum()
def tile_labels(label: torch.Tensor, n_k: int) -> torch.Tensor:
x = label.unsqueeze(dim=1).repeat((1, n_k, 1, 1))
return x
class RandomWalker():
r""" Creates normal sampler with std of sigma
Used to suggest new updates to the positions of the walkers
Usage:
distr = RandomWalker(sigma.to(device=device, dtype=dtype))
Args:
sigma (float): step size of the walkers
"""
def __init__(self, sigma):
self.step_gaussian = Normal(0.0, sigma)
def resample(self, prev_sample) -> torch.Tensor:
return prev_sample + self.step_gaussian.sample(prev_sample.shape)
class Uniform(nn.Module):
r""" Creates a uniform sampler between 0 and 1
Used to determine whether moves accepted or rejected
Usage:
alpha_distr = Uniform(torch.tensor(0.).to(device=device, dtype=dtype), torch.tensor(1.).to(device=device, dtype=dtype))
"""
def __init__(self, low=0, high=1):
super(Uniform, self).__init__()
self.low = torch.tensor(low) if type(low) != torch.Tensor else low
self.high = torch.tensor(high) if type(high) != torch.Tensor else high
def forward(self, batch_size: int = 1):
return self.low + torch.rand(batch_size, device=self.low.device) * (self.high - self.low)
def sample(self, batch_size: int = 1):
return self(batch_size)
class ToProb(nn.Module):
def forward(self, amps: torch.Tensor) -> torch.Tensor:
return torch.exp(amps) ** 2
def initialize_samples(ne_atoms, atom_positions, n_samples):
r""" Initialises samples for pretraining
Usage:
samples = initialize_samples(ne_atoms, atom_positions, n_samples).to(device=self.device, dtype=self.dtype)
Args:
ne_atoms (list int): number of electrons assigned to each nucleus
atom_positions (list np.array): positions of the nuclei
n_samples (int): number of walkers
Returns:
samples (np.array): walker positions (n_samples, n_elec, 3)
"""
ups = []
downs = []
for ne_atom, atom_position in zip(ne_atoms, atom_positions):
for e in range(ne_atom):
if e % 2 == 0: # fill up the orbitals alternating up down
curr_sample_up = np.random.normal(loc=atom_position, scale=1., size=(n_samples, 1, 3))
ups.append(curr_sample_up)
else:
curr_sample_down = np.random.normal(loc=atom_position, scale=1., size=(n_samples, 1, 3))
downs.append(curr_sample_down)
ups = np.concatenate(ups, axis=1)
downs = np.concatenate(downs, axis=1)
curr_sample = np.concatenate([ups, downs], axis=1) # stack the ups first to be consistent with model
return torch.from_numpy(curr_sample)
class MetropolisHastingsPretrain(nn.Module):
r""" Implements MetropolisHastings sampling based on [pfau2020ab]
Samples congigurations based on the amplitudes of both the Hartree Fock orbitals and the wave function Ansatz
.. math:
Usage:
sampler = MetropolisHastingsPretrain()
Args:
sigma (float): step size for the walkers (std of the proposed moves)
correlation_length (int): number of steps between sampling each update of the walker positions
target_acceptance (float): the target acceptance of the steps
Returns:
curr_sample (torch.Tensor): walker configurations (n_samples, n_elec, 3)
"""
def __init__(self,
sigma: float = 0.02,
correlation_length: int = 10,
target_acceptance: float = 0.5,
device: str = 'cuda',
dtype: torch.dtype = torch.float32):
super(MetropolisHastingsPretrain, self).__init__()
self.device = device
self.dtype = dtype
self.sigma = sigma
self.correlation_length = correlation_length
self.distr = RandomWalker(sigma)
self.alpha_distr = Uniform(torch.tensor(0.).to(device=device, dtype=dtype), torch.tensor(1.).to(device=device, dtype=dtype))
self.to_prob = ToProb()
self.acceptance = 0.0
self.target_acceptance = target_acceptance
print('initialized pretraining sampler')
def forward(self, model, pretrainer, curr_sample):
n_samples = curr_sample.shape[0]
# --- split the walkers and sample half from the hf_orbitals and half from the wave function
sams = curr_sample.split([n_samples // 2, n_samples // 2])
curr_sample_model, curr_sample_hf = sams[0].squeeze(), sams[1].squeeze()
shape = curr_sample_model.shape
curr_log_amp = model(curr_sample_model)[0]
curr_prob_model = self.to_prob(curr_log_amp)
curr_prob_hf = pretrainer.compute_orbital_probability(curr_sample_hf)
acceptance_total_mod = 0.
acceptance_total_hf = 0.
for _ in range(self.correlation_length):
# --- next sample
new_sample_model = curr_sample_model + torch.normal(0.0, self.sigma, size=shape, device=self.device, dtype=self.dtype)
new_log_amp = model(new_sample_model)[0]
new_prob_model = self.to_prob(new_log_amp)
new_sample_hf = curr_sample_hf + torch.normal(0.0, self.sigma, size=shape, device=self.device)
new_prob_hf = pretrainer.compute_orbital_probability(new_sample_hf).to(self.device)
# --- update sample
alpha_model = new_prob_model / curr_prob_model
alpha_hf = new_prob_hf / curr_prob_hf
# --- generate masks
mask_model = alpha_model > torch.rand(shape[0], device=self.device, dtype=self.dtype)
mask_hf = alpha_hf > torch.rand(shape[0], device=self.device, dtype=self.dtype)
curr_sample_model = torch.where(mask_model.unsqueeze(-1).unsqueeze(-1), new_sample_model, curr_sample_model)
curr_prob_model = torch.where(mask_model, new_prob_model, curr_prob_model)
curr_sample_hf = torch.where(mask_hf.unsqueeze(-1).unsqueeze(-1), new_sample_hf, curr_sample_hf)
curr_prob_hf = torch.where(mask_hf, new_prob_hf, curr_prob_hf)
acceptance_total_mod += mask_model.type(self.dtype).mean()
acceptance_total_hf += mask_hf.type(self.dtype).mean()
curr_sample = torch.cat([curr_sample_model, curr_sample_hf], dim=0)
# --- randomly permute so some walkers in the next run sample from different distribution than in this run
idxs = torch.randperm(len(curr_sample))
curr_sample = curr_sample[idxs]
return curr_sample
def adjust_sampling_steps(self, acceptance):
if acceptance < 0.5:
self.sigma += 0.001
else:
self.sigma -= 0.001
"""
Below is a depreciated version of the pretrainer which works the same
"""
class Pretrainer_dep(nn.Module):
r""" Implements the FermiNet wave function Ansatz pretraining based on [pfau2020ab]
Provides tools for pretraining the Ansatz.
.. math:
Usage:
wf = FermiNet(mol, n_layers, nf_hidden_single, nf_hidden_pairwise, n_determinants).cuda()
pretrainer = Pretrainer(mol).cuda()
pretrainer.pretrain(wf)
Args:
mol (:class:`~deepqmc.Molecule`): molecule whose wave function is represented
basis (str): basis for the molecular orbitals
"""
def __init__(self,
mol,
basis: str = '6-311g',
device: str = 'cuda',
dtype: torch.dtype = torch.float32):
super(Pretrainer_dep, self).__init__()
self.device = device
self.dtype = dtype
self.atom_positions = [x.cpu().numpy() for x in mol.coords.split(1, dim=0)]
self.ne_atoms = [int(i) for i in mol.charges]
self.mol = mol
self.n_elec = int(mol.charges)
self.n_up = (self.n_elec + mol.spin) // 2
self.n_down = (self.n_elec - mol.spin) // 2
self.n_atoms = len(self.mol)
self.n_orbitals = max(self.n_up, self.n_down) # if n_orbital is none return max of up or down
# cas and workdir set to None
mf, mc = pyscf_from_mol(mol, basis, None, None)
basis = GTOBasis.from_pyscf(mf.mol) # basis from molecule from name
mol = Molecule(
mf.mol.atom_coords(),
mf.mol.atom_charges(),
mf.mol.charge,
mf.mol.spin,
)
self.mo = MolecularOrbital( # create the molecular orbital
mol,
basis,
self.n_orbitals,
cusp_correction=False)
self.mo.init_from_pyscf(mf, freeze_mos=True)
self.coords = mol.coords.unsqueeze(0).to(device=device, dtype=dtype)
def compute_orbital_probability(self, samples: torch.Tensor) -> torch.Tensor:
up_dets, down_dets = self.hf_orbitals(samples)
spin_ups = up_dets ** 2
spin_downs = down_dets ** 2
p_up = torch.diagonal(spin_ups, dim1=-2, dim2=-1).prod(-1)
p_down = torch.diagonal(spin_downs, dim1=-2, dim2=-1).prod(-1)
probabilities = p_up * p_down
return probabilities.detach()
def hf_orbitals(self, samples: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
samples_hf = samples.view(-1, 1, 3).repeat(1, self.n_atoms, 1)
diffs_nuc = pairwise_diffs(torch.cat([self.coords, samples_hf]), self.coords).squeeze(1)
determinants = self.mo(diffs_nuc).unsqueeze(1).view(-1, self.n_elec, self.n_orbitals)
up_dets, down_dets = determinants.split([self.n_up, self.n_down], dim=1)
up_dets, down_dets = up_dets[:, :, :up_dets.shape[1]], down_dets[:, :, :down_dets.shape[1]]
return up_dets, down_dets
def pretrain(self,
wf: WaveFunction,
n_samples: int = 1024,
n_steps: int = 1000):
sampler = MetropolisHastingsPretrain()
opt = torch.optim.Adam(list(wf.parameters())[:-3], lr=0.0001)
steps = trange(
0, # init_step = 0
n_steps,
initial=0,
total=n_steps,
desc='pretraining',
disable=None,
)
samples = initialize_samples(self.ne_atoms, self.atom_positions, n_samples).to(device=self.device, dtype=self.dtype)
for step in steps:
Es_loc, _, _ = local_energy(
samples,
wf.sample(False),
create_graph=False,
keep_graph=False,
)
samples = sampler(wf, self, samples)
up_dets, down_dets = self.hf_orbitals(samples)
up_dets = tile_labels(up_dets, wf.n_determinants)
down_dets = tile_labels(down_dets, wf.n_determinants)
wf.pretraining = True
model_up_dets, model_down_dets = wf(samples)
wf.pretraining = False
loss = mse_error(up_dets, model_up_dets)
loss += mse_error(down_dets, model_down_dets)
wf.zero_grad()
loss.backward() # in order for hook to work must call backward
opt.step()
print('iteration: ', step, ' energy: ', Es_loc.mean())
| [
"torch.rand",
"torch.cat",
"torch.diagonal",
"torch.distributions.Normal",
"torch.normal",
"torch.from_numpy",
"torch.tensor",
"torch.exp",
"torch.where"
] | 1.2 | maxiwelian/deepqmc | 0d243e2c5f5964a79929294e62e46819653b137b |
0.4 | from dataset import get_dataloader
from common import config
from model import get_autoencoder
from functional.utils import cycle
from agent import get_training_agent
from functional.visualization import visulize_motion_in_training
import torch
import os
from collections import OrderedDict
from tqdm import tqdm
from tensorboardX import SummaryWriter
import argparse
torch.backends.cudnn.benchmark = True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', type=str, choices=['skeleton', 'view', 'full'], required=True,
help='which structure to use')
# parser.add_argument('-c', '--continue', dest='continue_path', type=str, required=False)
parser.add_argument('-g', '--gpu_ids', type=int, default=0, required=False, help="specify gpu ids")
parser.add_argument('--disable_triplet', action='store_true', default=False, help="disable triplet loss")
parser.add_argument('--use_footvel_loss', action='store_true', default=False, help="use use footvel loss")
parser.add_argument('--vis', action='store_true', default=False, help="visualize output in training")
args = parser.parse_args()
config.initialize(args)
net = get_autoencoder(config)
print(net)
net = net.to(config.device)
# create tensorboard writer
train_tb = SummaryWriter(os.path.join(config.log_dir, 'train.events'))
val_tb = SummaryWriter(os.path.join(config.log_dir, 'val.events'))
# create dataloader
train_loader = get_dataloader('train', config, config.batch_size, config.num_workers)
mean_pose, std_pose = train_loader.dataset.mean_pose, train_loader.dataset.std_pose
val_loader = get_dataloader('test', config, config.batch_size, config.num_workers)
val_loader = cycle(val_loader)
# create training agent
tr_agent = get_training_agent(config, net)
clock = tr_agent.clock
# start training
for e in range(config.nr_epochs):
# begin iteration
pbar = tqdm(train_loader)
for b, data in enumerate(pbar):
# train step
outputs, losses = tr_agent.train_func(data)
losses_values = {k:v.item() for k, v in losses.items()}
# record loss to tensorboard
for k, v in losses_values.items():
train_tb.add_scalar(k, v, clock.step)
# visualize
if args.vis and clock.step % config.visualize_frequency == 0:
imgs = visulize_motion_in_training(outputs, mean_pose, std_pose)
for k, img in imgs.items():
train_tb.add_image(k, torch.from_numpy(img), clock.step)
pbar.set_description("EPOCH[{}][{}/{}]".format(e, b, len(train_loader)))
pbar.set_postfix(OrderedDict({"loss": sum(losses_values.values())}))
# validation step
if clock.step % config.val_frequency == 0:
data = next(val_loader)
outputs, losses = tr_agent.val_func(data)
losses_values = {k: v.item() for k, v in losses.items()}
for k, v in losses_values.items():
val_tb.add_scalar(k, v, clock.step)
if args.vis and clock.step % config.visualize_frequency == 0:
imgs = visulize_motion_in_training(outputs, mean_pose, std_pose)
for k, img in imgs.items():
val_tb.add_image(k, torch.from_numpy(img), clock.step)
clock.tick()
train_tb.add_scalar('learning_rate', tr_agent.optimizer.param_groups[-1]['lr'], clock.epoch)
tr_agent.update_learning_rate()
if clock.epoch % config.save_frequency == 0:
tr_agent.save_network()
tr_agent.save_network('latest.pth.tar')
clock.tock()
if __name__ == '__main__':
main()
| [
"torch.from_numpy"
] | 0.4.1 | diegcr/2D-Motion-Retargeting | 2b4acedb45a281d2867c812fce6063dc68b8e88b |
1.8 | import torch
from .base_model import BaseModel
class Classifier(BaseModel):
def __init__(self, model, **kwargs):
super(Classifier, self).__init__(**kwargs)
self.model = model
self.model_name = self.model.name
if self.optimizer is not None:
self.optimizer = self.optimizer(self.parameters(), lr= self.lr)
self.set_optimizer_params()
if self.freeze:
for params in self.model.parameters():
params.requires_grad = False
if self.device:
self.model.to(self.device)
def forward(self, x):
return self.model(x)
def training_step(self, batch):
outputs = self.model(batch, self.device)
targets = batch['targets'].to(self.device)
loss = self.criterion(outputs, targets)
loss_dict = {'T': loss.item()}
return loss, loss_dict
def inference_step(self, batch, return_probs=False):
outputs = self.model(batch, self.device)
preds = torch.argmax(outputs, dim=1)
preds = preds.detach()
if return_probs:
probs = torch.nn.functional.softmax(outputs, dim=1)
probs, _ = torch.max(probs, dim=1)
return preds.cpu().numpy(), probs.cpu().numpy()
else:
return preds.numpy()
def evaluate_step(self, batch):
outputs = self.model(batch, self.device)
targets = batch['targets'].to(self.device)
loss = self.criterion(outputs, targets)
loss_dict = {'T': loss.item()}
self.update_metrics(outputs = outputs, targets = targets)
return loss, loss_dict | [
"torch.max",
"torch.argmax",
"torch.nn.functional.softmax"
] | 1.8.1 | elviva404/food-detection-yolov5 | 796a0c1df6e9c9a705dff7782b3f9b213344f11b |
1.10 | import argparse
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import torch
import torch.nn as nn
from models.rnn import IDP_test_rnn
from models.transformer import IDP_compare_Transformer
from idp_methods.utils import *
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--batch_size', type=int, default=4, help='batch size for training')
parser.add_argument('--dataset', type=str, default="d723", help='dataset name')
parser.add_argument('--epochs', type=int, default=50, help='total number of epochs')
parser.add_argument('--test-predictor', type=str, default='seg',
choices=['prediction-disorder-iupl', 'prediction-disorder-iups',
'prediction-disorder-espN', 'prediction-disorder-espX', 'prediction-disorder-espD',
'prediction-disorder-glo', 'cast', 'seg'])
args = parser.parse_args()
SEED = 42
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
torch.cuda.manual_seed(SEED)
dataset = args.dataset
if dataset == 'd723':
train_dataset = np.load('./results/mobidb/d723_train2.npy',
allow_pickle=True).item()
val_dataset = np.load('./results/mobidb/d723_test2.npy', allow_pickle=True).item()
print(val_dataset['0'].keys())
predictors = ['prediction-disorder-iupl', 'prediction-disorder-iups',
'prediction-disorder-espN', 'prediction-disorder-espX', 'prediction-disorder-espD',
'prediction-disorder-glo', 'cast', 'seg']
elif dataset == 'mxd494':
train_dataset = np.load('./results/mobidb/mxd494_train_pred3.npy',
allow_pickle=True).item()
val_dataset = np.load('./results/mobidb/mxd494_val_pred3.npy', allow_pickle=True).item()
print(val_dataset['0'].keys())
predictors = ['prediction-disorder-iupl', 'prediction-disorder-iups',
'prediction-disorder-espN', 'prediction-disorder-espX', 'prediction-disorder-espD',
'prediction-disorder-glo', 'cast', 'seg','fldpnn']
test_predictor = args.test_predictor
predictors.remove(test_predictor)
train_predictors = predictors
assert len(train_predictors) == len(predictors)
def next_number(x, N=20):
if x % 20:
return x + (N - x % N)
else:
return 0
m = IDP_test_rnn(input_channels=len(train_predictors))
m =IDP_compare_Transformer(input_channels=len(train_predictors))
train_X = []
trainY = []
for sample in train_dataset:
# print(train_dataset[sample].keys())
sample_data = torch.tensor([])
for preds_ in train_predictors:
data = torch.from_numpy(train_dataset[sample][preds_]).unsqueeze(0).float()
sample_data = torch.cat([sample_data, data])
train_X.append(sample_data.transpose(0, 1).float())
trainY.append(torch.from_numpy(train_dataset[sample][test_predictor]).unsqueeze(0).transpose(0, 1).float())
# print(torch.from_numpy(train_dataset[sample][test_predictor]).unsqueeze(0).shape,sample_data.shape)
val_X = []
valY = []
for sample in val_dataset:
# print(train_dataset[sample].keys())
sample_data = torch.tensor([])
for preds_ in train_predictors:
data = torch.from_numpy(train_dataset[sample][preds_]).unsqueeze(0).float()
sample_data = torch.cat([sample_data, data])
val_X.append(sample_data.transpose(0, 1).float())
valY.append(torch.from_numpy(train_dataset[sample][test_predictor]).unsqueeze(0).transpose(0, 1).float())
EPOCHS = 50
optimizer = torch.optim.Adam(m.parameters(), lr=0.5e-3)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
m = m.to(device)
loss = nn.CrossEntropyLoss()
for i in range(EPOCHS):
train_loss = 0.0
val_loss = 0.0
yhat = []
y = []
for batchidx in range(len(train_X)):
sample = train_X[batchidx].to(device)
out = m(sample.unsqueeze(0)).squeeze(0)
target = trainY[batchidx].squeeze(-1).to(device)
# print(target.shape,out.shape)
loss_sclar = loss(out, target.long())
loss_sclar.backward()
optimizer.step()
optimizer.zero_grad()
train_loss += loss_sclar.item()
output = torch.softmax(out, dim=-1) # .squeeze()
# print(output.shape)
_, output = torch.max(output, 1)
# print(output.shape)
y += target.squeeze().detach().cpu().numpy().tolist()
yhat += output.tolist()
#print(f'EPOCH {i} Train Loss {train_loss / (batchidx + 1):.4f}')
metrics_, _ = dataset_metrics(yhat, y)
print(f'EPOCH {i} Train Loss {train_loss / (batchidx + 1):.4f}')
print(f'EPOCH TRAIN METRICS{i}\n{metrics_}')
train_loss = 0.0
val_loss = 0.0
yhat = []
y = []
for batchidx in range(len(val_X)):
sample = val_X[batchidx].to(device)
out = m(sample.unsqueeze(0)).squeeze(0)
target = valY[batchidx].squeeze(-1).to(device)
# print(target.shape,out.shape)
loss_sclar = loss(out, target.long())
loss_sclar.backward()
optimizer.step()
optimizer.zero_grad()
val_loss += loss_sclar.item()
output = torch.softmax(out, dim=-1) # .squeeze()
# print(output.shape)
_, output = torch.max(output, 1)
# print(output.shape)
y += target.squeeze().detach().cpu().numpy().tolist()
yhat += output.tolist()
metrics_, _ = dataset_metrics(yhat, y)
print(f'EPOCH {i} Val Loss {val_loss / (batchidx + 1):.4f}')
print(f'EPOCH VALIDATION METRICS {i}\n{metrics_}')
# print(out.shape,sample.shape)
| [
"torch.device",
"torch.cat",
"torch.cuda.manual_seed",
"torch.max",
"torch.softmax",
"torch.from_numpy",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.CrossEntropyLoss"
] | 1.10.0 | iliasprc/IDPMetagenome | 519cec77bb55eb91dbb7b243a2d80999742c033d |
1.6 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-Task GP models.
References
.. [Doucet2010sampl]
A. Doucet. A Note on Efficient Conditional Simulation of Gaussian Distributions.
http://www.stats.ox.ac.uk/~doucet/doucet_simulationconditionalgaussian.pdf,
Apr 2010.
.. [Maddox2021bohdo]
W. Maddox, M. Balandat, A. Wilson, and E. Bakshy. Bayesian Optimization with
High-Dimensional Outputs. https://arxiv.org/abs/2106.12997, Jun 2021.
"""
from __future__ import annotations
import math
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from botorch.acquisition.objective import PosteriorTransform
from botorch.models.gp_regression import MIN_INFERRED_NOISE_LEVEL
from botorch.models.gpytorch import GPyTorchModel, MultiTaskGPyTorchModel
from botorch.models.transforms.input import InputTransform
from botorch.models.transforms.outcome import OutcomeTransform
from botorch.posteriors.multitask import MultitaskGPPosterior
from botorch.utils.containers import TrainingData
from gpytorch.constraints import GreaterThan
from gpytorch.distributions.multitask_multivariate_normal import (
MultitaskMultivariateNormal,
)
from gpytorch.distributions.multivariate_normal import MultivariateNormal
from gpytorch.kernels.index_kernel import IndexKernel
from gpytorch.kernels.matern_kernel import MaternKernel
from gpytorch.kernels.multitask_kernel import MultitaskKernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.lazy import (
BatchRepeatLazyTensor,
CatLazyTensor,
DiagLazyTensor,
KroneckerProductDiagLazyTensor,
KroneckerProductLazyTensor,
lazify,
RootLazyTensor,
)
from gpytorch.likelihoods.gaussian_likelihood import (
FixedNoiseGaussianLikelihood,
GaussianLikelihood,
)
from gpytorch.likelihoods.multitask_gaussian_likelihood import (
MultitaskGaussianLikelihood,
)
from gpytorch.means import MultitaskMean
from gpytorch.means.constant_mean import ConstantMean
from gpytorch.models.exact_gp import ExactGP
from gpytorch.module import Module
from gpytorch.priors.lkj_prior import LKJCovariancePrior
from gpytorch.priors.prior import Prior
from gpytorch.priors.smoothed_box_prior import SmoothedBoxPrior
from gpytorch.priors.torch_priors import GammaPrior
from gpytorch.settings import detach_test_caches
from gpytorch.utils.errors import CachingError
from gpytorch.utils.memoize import cached, pop_from_cache
from torch import Tensor
class MultiTaskGP(ExactGP, MultiTaskGPyTorchModel):
r"""Multi-Task GP model using an ICM kernel, inferring observation noise.
Multi-task exact GP that uses a simple ICM kernel. Can be single-output or
multi-output. This model uses relatively strong priors on the base Kernel
hyperparameters, which work best when covariates are normalized to the unit
cube and outcomes are standardized (zero mean, unit variance).
This model infers the noise level. WARNING: It currently does not support
different noise levels for the different tasks. If you have known observation
noise, please use `FixedNoiseMultiTaskGP` instead.
"""
def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
task_feature: int,
covar_module: Optional[Module] = None,
task_covar_prior: Optional[Prior] = None,
output_tasks: Optional[List[int]] = None,
rank: Optional[int] = None,
input_transform: Optional[InputTransform] = None,
outcome_transform: Optional[OutcomeTransform] = None,
) -> None:
r"""Multi-Task GP model using an ICM kernel, inferring observation noise.
Args:
train_X: A `n x (d + 1)` or `b x n x (d + 1)` (batch mode) tensor
of training data. One of the columns should contain the task
features (see `task_feature` argument).
train_Y: A `n x 1` or `b x n x 1` (batch mode) tensor of training
observations.
task_feature: The index of the task feature (`-d <= task_feature <= d`).
output_tasks: A list of task indices for which to compute model
outputs for. If omitted, return outputs for all task indices.
rank: The rank to be used for the index kernel. If omitted, use a
full rank (i.e. number of tasks) kernel.
task_covar_prior : A Prior on the task covariance matrix. Must operate
on p.s.d. matrices. A common prior for this is the `LKJ` prior.
input_transform: An input transform that is applied in the model's
forward pass.
Example:
>>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2)
>>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1)
>>> train_X = torch.cat([
>>> torch.cat([X1, i1], -1), torch.cat([X2, i2], -1),
>>> ])
>>> train_Y = torch.cat(f1(X1), f2(X2)).unsqueeze(-1)
>>> model = MultiTaskGP(train_X, train_Y, task_feature=-1)
"""
with torch.no_grad():
transformed_X = self.transform_inputs(
X=train_X, input_transform=input_transform
)
self._validate_tensor_args(X=transformed_X, Y=train_Y)
all_tasks, task_feature, d = self.get_all_tasks(
transformed_X, task_feature, output_tasks
)
if outcome_transform is not None:
train_Y, _ = outcome_transform(train_Y)
# squeeze output dim
train_Y = train_Y.squeeze(-1)
if output_tasks is None:
output_tasks = all_tasks
else:
if set(output_tasks) - set(all_tasks):
raise RuntimeError("All output tasks must be present in input data.")
self._output_tasks = output_tasks
self._num_outputs = len(output_tasks)
# TODO (T41270962): Support task-specific noise levels in likelihood
likelihood = GaussianLikelihood(noise_prior=GammaPrior(1.1, 0.05))
# construct indexer to be used in forward
self._task_feature = task_feature
self._base_idxr = torch.arange(d)
self._base_idxr[task_feature:] += 1 # exclude task feature
super().__init__(
train_inputs=train_X, train_targets=train_Y, likelihood=likelihood
)
self.mean_module = ConstantMean()
if covar_module is None:
self.covar_module = ScaleKernel(
base_kernel=MaternKernel(
nu=2.5, ard_num_dims=d, lengthscale_prior=GammaPrior(3.0, 6.0)
),
outputscale_prior=GammaPrior(2.0, 0.15),
)
else:
self.covar_module = covar_module
num_tasks = len(all_tasks)
self._rank = rank if rank is not None else num_tasks
self.task_covar_module = IndexKernel(
num_tasks=num_tasks, rank=self._rank, prior=task_covar_prior
)
if input_transform is not None:
self.input_transform = input_transform
if outcome_transform is not None:
self.outcome_transform = outcome_transform
self.to(train_X)
def _split_inputs(self, x: Tensor) -> Tuple[Tensor, Tensor]:
r"""Extracts base features and task indices from input data.
Args:
x: The full input tensor with trailing dimension of size `d + 1`.
Should be of float/double data type.
Returns:
2-element tuple containing
- A `q x d` or `b x q x d` (batch mode) tensor with trailing
dimension made up of the `d` non-task-index columns of `x`, arranged
in the order as specified by the indexer generated during model
instantiation.
- A `q` or `b x q` (batch mode) tensor of long data type containing
the task indices.
"""
batch_shape, d = x.shape[:-2], x.shape[-1]
x_basic = x[..., self._base_idxr].view(batch_shape + torch.Size([-1, d - 1]))
task_idcs = (
x[..., self._task_feature]
.view(batch_shape + torch.Size([-1, 1]))
.to(dtype=torch.long)
)
return x_basic, task_idcs
def forward(self, x: Tensor) -> MultivariateNormal:
if self.training:
x = self.transform_inputs(x)
x_basic, task_idcs = self._split_inputs(x)
# Compute base mean and covariance
mean_x = self.mean_module(x_basic)
covar_x = self.covar_module(x_basic)
# Compute task covariances
covar_i = self.task_covar_module(task_idcs)
# Combine the two in an ICM fashion
covar = covar_x.mul(covar_i)
return MultivariateNormal(mean_x, covar)
@classmethod
def get_all_tasks(
cls,
train_X: Tensor,
task_feature: int,
output_tasks: Optional[List[int]] = None,
) -> Tuple[List[int], int, int]:
if train_X.ndim != 2:
# Currently, batch mode MTGPs are blocked upstream in GPyTorch
raise ValueError(f"Unsupported shape {train_X.shape} for train_X.")
d = train_X.shape[-1] - 1
if not (-d <= task_feature <= d):
raise ValueError(f"Must have that -{d} <= task_feature <= {d}")
task_feature = task_feature % (d + 1)
all_tasks = train_X[:, task_feature].unique().to(dtype=torch.long).tolist()
return all_tasks, task_feature, d
@classmethod
def construct_inputs(cls, training_data: TrainingData, **kwargs) -> Dict[str, Any]:
r"""Construct kwargs for the `Model` from `TrainingData` and other options.
Args:
training_data: `TrainingData` container with data for single outcome
or for multiple outcomes for batched multi-output case.
**kwargs: Additional options for the model that pertain to the
training data, including:
- `task_features`: Indices of the input columns containing the task
features (expected list of length 1),
- `task_covar_prior`: A GPyTorch `Prior` object to use as prior on
the cross-task covariance matrix,
- `prior_config`: A dict representing a prior config, should only be
used if `prior` is not passed directly. Should contain:
`use_LKJ_prior` (whether to use LKJ prior) and `eta` (eta value,
float),
- `rank`: The rank of the cross-task covariance matrix.
"""
task_features = kwargs.pop("task_features", None)
if task_features is None:
raise ValueError(f"`task_features` required for {cls.__name__}.")
task_feature = task_features[0]
inputs = {
"train_X": training_data.X,
"train_Y": training_data.Y,
"task_feature": task_feature,
"rank": kwargs.get("rank"),
}
prior = kwargs.get("task_covar_prior")
prior_config = kwargs.get("prior_config")
if prior and prior_config:
raise ValueError(
"Only one of `prior` and `prior_config` arguments expected."
)
if prior_config:
if not prior_config.get("use_LKJ_prior"):
raise ValueError("Currently only config for LKJ prior is supported.")
all_tasks, _, _ = MultiTaskGP.get_all_tasks(training_data.X, task_feature)
num_tasks = len(all_tasks)
sd_prior = GammaPrior(1.0, 0.15)
sd_prior._event_shape = torch.Size([num_tasks])
eta = prior_config.get("eta", 0.5)
if not isinstance(eta, float) and not isinstance(eta, int):
raise ValueError(f"eta must be a real number, your eta was {eta}.")
prior = LKJCovariancePrior(num_tasks, eta, sd_prior)
inputs["task_covar_prior"] = prior
return inputs
class FixedNoiseMultiTaskGP(MultiTaskGP):
r"""Multi-Task GP model using an ICM kernel, with known observation noise.
Multi-task exact GP that uses a simple ICM kernel. Can be single-output or
multi-output. This model uses relatively strong priors on the base Kernel
hyperparameters, which work best when covariates are normalized to the unit
cube and outcomes are standardized (zero mean, unit variance).
This model requires observation noise data (specified in `train_Yvar`).
"""
def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Tensor,
task_feature: int,
covar_module: Optional[Module] = None,
task_covar_prior: Optional[Prior] = None,
output_tasks: Optional[List[int]] = None,
rank: Optional[int] = None,
input_transform: Optional[InputTransform] = None,
) -> None:
r"""Multi-Task GP model using an ICM kernel and known observation noise.
Args:
train_X: A `n x (d + 1)` or `b x n x (d + 1)` (batch mode) tensor
of training data. One of the columns should contain the task
features (see `task_feature` argument).
train_Y: A `n x 1` or `b x n x 1` (batch mode) tensor of training
observations.
train_Yvar: A `n` or `b x n` (batch mode) tensor of observation
noise standard errors.
task_feature: The index of the task feature (`-d <= task_feature <= d`).
task_covar_prior : A Prior on the task covariance matrix. Must operate
on p.s.d. matrices. A common prior for this is the `LKJ` prior.
output_tasks: A list of task indices for which to compute model
outputs for. If omitted, return outputs for all task indices.
rank: The rank to be used for the index kernel. If omitted, use a
full rank (i.e. number of tasks) kernel.
input_transform: An input transform that is applied in the model's
forward pass.
Example:
>>> X1, X2 = torch.rand(10, 2), torch.rand(20, 2)
>>> i1, i2 = torch.zeros(10, 1), torch.ones(20, 1)
>>> train_X = torch.cat([
>>> torch.cat([X1, i1], -1), torch.cat([X2, i2], -1),
>>> ], dim=0)
>>> train_Y = torch.cat(f1(X1), f2(X2))
>>> train_Yvar = 0.1 + 0.1 * torch.rand_like(train_Y)
>>> model = FixedNoiseMultiTaskGP(train_X, train_Y, train_Yvar, -1)
"""
with torch.no_grad():
transformed_X = self.transform_inputs(
X=train_X, input_transform=input_transform
)
self._validate_tensor_args(X=transformed_X, Y=train_Y, Yvar=train_Yvar)
# We'll instatiate a MultiTaskGP and simply override the likelihood
super().__init__(
train_X=train_X,
train_Y=train_Y,
covar_module=covar_module,
task_feature=task_feature,
output_tasks=output_tasks,
rank=rank,
task_covar_prior=task_covar_prior,
input_transform=input_transform,
)
self.likelihood = FixedNoiseGaussianLikelihood(noise=train_Yvar.squeeze(-1))
self.to(train_X)
@classmethod
def construct_inputs(cls, training_data: TrainingData, **kwargs) -> Dict[str, Any]:
r"""Construct kwargs for the `Model` from `TrainingData` and other options.
Args:
training_data: `TrainingData` container with data for single outcome
or for multiple outcomes for batched multi-output case.
**kwargs: Additional options for the model that pertain to the
training data, including:
- `task_features`: Indices of the input columns containing the task
features (expected list of length 1),
- `task_covar_prior`: A GPyTorch `Prior` object to use as prior on
the cross-task covariance matrix,
- `prior_config`: A dict representing a prior config, should only be
used if `prior` is not passed directly. Should contain:
use_LKJ_prior` (whether to use LKJ prior) and `eta` (eta value,
float),
- `rank`: The rank of the cross-task covariance matrix.
"""
if training_data.Yvar is None:
raise ValueError(f"Yvar required for {cls.__name__}.")
inputs = super().construct_inputs(training_data=training_data, **kwargs)
inputs["train_Yvar"] = training_data.Yvar
return inputs
class KroneckerMultiTaskGP(ExactGP, GPyTorchModel):
"""Multi-task GP with Kronecker structure, using an ICM kernel.
This model assumes the "block design" case, i.e., it requires that all tasks
are observed at all data points.
For posterior sampling, this model uses Matheron's rule [Doucet2010sampl] to compute
the posterior over all tasks as in [Maddox2021bohdo] by exploiting Kronecker
structure.
"""
def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
likelihood: Optional[MultitaskGaussianLikelihood] = None,
data_covar_module: Optional[Module] = None,
task_covar_prior: Optional[Prior] = None,
rank: Optional[int] = None,
input_transform: Optional[InputTransform] = None,
outcome_transform: Optional[OutcomeTransform] = None,
**kwargs: Any,
) -> None:
r"""Multi-task GP with Kronecker structure, using a simple ICM kernel.
Args:
train_X: A `batch_shape x n x d` tensor of training features.
train_Y: A `batch_shape x n x m` tensor of training observations.
likelihood: A `MultitaskGaussianLikelihood`. If omitted, uses a
`MultitaskGaussianLikelihood` with a `GammaPrior(1.1, 0.05)`
noise prior.
data_covar_module: The module computing the covariance (Kernel) matrix
in data space. If omitted, use a `MaternKernel`.
task_covar_prior : A Prior on the task covariance matrix. Must operate
on p.s.d. matrices. A common prior for this is the `LKJ` prior. If
omitted, uses `LKJCovariancePrior` with `eta` parameter as specified
in the keyword arguments (if not specified, use `eta=1.5`).
rank: The rank of the ICM kernel. If omitted, use a full rank kernel.
kwargs: Additional arguments to override default settings of priors,
including:
- eta: The eta parameter on the default LKJ task_covar_prior.
A value of 1.0 is uninformative, values <1.0 favor stronger
correlations (in magnitude), correlations vanish as eta -> inf.
- sd_prior: A scalar prior over nonnegative numbers, which is used
for the default LKJCovariancePrior task_covar_prior.
- likelihood_rank: The rank of the task covariance matrix to fit.
Defaults to 0 (which corresponds to a diagonal covariance matrix).
Example:
>>> train_X = torch.rand(10, 2)
>>> train_Y = torch.cat([f_1(X), f_2(X)], dim=-1)
>>> model = KroneckerMultiTaskGP(train_X, train_Y)
"""
with torch.no_grad():
transformed_X = self.transform_inputs(
X=train_X, input_transform=input_transform
)
if outcome_transform is not None:
train_Y, _ = outcome_transform(train_Y)
self._validate_tensor_args(X=transformed_X, Y=train_Y)
self._num_outputs = train_Y.shape[-1]
batch_shape, ard_num_dims = train_X.shape[:-2], train_X.shape[-1]
num_tasks = train_Y.shape[-1]
if rank is None:
rank = num_tasks
if likelihood is None:
noise_prior = GammaPrior(1.1, 0.05)
noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate
likelihood = MultitaskGaussianLikelihood(
num_tasks=num_tasks,
batch_shape=batch_shape,
noise_prior=noise_prior,
noise_constraint=GreaterThan(
MIN_INFERRED_NOISE_LEVEL,
transform=None,
initial_value=noise_prior_mode,
),
rank=kwargs.get("likelihood_rank", 0),
)
if task_covar_prior is None:
task_covar_prior = LKJCovariancePrior(
n=num_tasks,
eta=kwargs.get("eta", 1.5),
sd_prior=kwargs.get(
"sd_prior",
SmoothedBoxPrior(math.exp(-6), math.exp(1.25), 0.05),
),
)
super().__init__(train_X, train_Y, likelihood)
self.mean_module = MultitaskMean(
base_means=ConstantMean(batch_shape=batch_shape), num_tasks=num_tasks
)
if data_covar_module is None:
data_covar_module = MaternKernel(
nu=2.5,
ard_num_dims=ard_num_dims,
lengthscale_prior=GammaPrior(3.0, 6.0),
batch_shape=batch_shape,
)
else:
data_covar_module = data_covar_module
self.covar_module = MultitaskKernel(
data_covar_module=data_covar_module,
num_tasks=num_tasks,
rank=rank,
batch_shape=batch_shape,
task_covar_prior=task_covar_prior,
)
if outcome_transform is not None:
self.outcome_transform = outcome_transform
if input_transform is not None:
self.input_transform = input_transform
self.to(train_X)
def forward(self, X: Tensor) -> MultitaskMultivariateNormal:
if self.training:
X = self.transform_inputs(X)
mean_x = self.mean_module(X)
covar_x = self.covar_module(X)
return MultitaskMultivariateNormal(mean_x, covar_x)
@property
def _task_covar_matrix(self):
res = self.covar_module.task_covar_module.covar_matrix
if detach_test_caches.on():
res = res.detach()
return res
@property
@cached(name="train_full_covar")
def train_full_covar(self):
train_x = self.transform_inputs(self.train_inputs[0])
# construct Kxx \otimes Ktt
train_full_covar = self.covar_module(train_x).evaluate_kernel()
if detach_test_caches.on():
train_full_covar = train_full_covar.detach()
return train_full_covar
@property
@cached(name="predictive_mean_cache")
def predictive_mean_cache(self):
train_x = self.transform_inputs(self.train_inputs[0])
train_noise = self.likelihood._shaped_noise_covar(train_x.shape)
if detach_test_caches.on():
train_noise = train_noise.detach()
train_diff = self.train_targets - self.mean_module(train_x)
train_solve = (self.train_full_covar + train_noise).inv_matmul(
train_diff.reshape(*train_diff.shape[:-2], -1)
)
if detach_test_caches.on():
train_solve = train_solve.detach()
return train_solve
def posterior(
self,
X: Tensor,
output_indices: Optional[List[int]] = None,
observation_noise: Union[bool, Tensor] = False,
posterior_transform: Optional[PosteriorTransform] = None,
**kwargs: Any,
) -> MultitaskGPPosterior:
self.eval()
if posterior_transform is not None:
# this could be very costly, disallow for now
raise NotImplementedError(
"Posterior transforms currently not supported for "
f"{self.__class__.__name__}"
)
X = self.transform_inputs(X)
train_x = self.transform_inputs(self.train_inputs[0])
# construct Ktt
task_covar = self._task_covar_matrix
task_rootlt = self._task_covar_matrix.root_decomposition(
method="diagonalization"
)
task_root = task_rootlt.root
if task_covar.batch_shape != X.shape[:-2]:
task_covar = BatchRepeatLazyTensor(task_covar, batch_repeat=X.shape[:-2])
task_root = BatchRepeatLazyTensor(
lazify(task_root), batch_repeat=X.shape[:-2]
)
task_covar_rootlt = RootLazyTensor(task_root)
# construct RR' \approx Kxx
data_data_covar = self.train_full_covar.lazy_tensors[0]
# populate the diagonalziation caches for the root and inverse root
# decomposition
data_data_evals, data_data_evecs = data_data_covar.diagonalization()
# pad the eigenvalue and eigenvectors with zeros if we are using lanczos
if data_data_evecs.shape[-1] < data_data_evecs.shape[-2]:
cols_to_add = data_data_evecs.shape[-2] - data_data_evecs.shape[-1]
zero_evecs = torch.zeros(
*data_data_evecs.shape[:-1],
cols_to_add,
dtype=data_data_evals.dtype,
device=data_data_evals.device,
)
zero_evals = torch.zeros(
*data_data_evecs.shape[:-2],
cols_to_add,
dtype=data_data_evals.dtype,
device=data_data_evals.device,
)
data_data_evecs = CatLazyTensor(
data_data_evecs,
lazify(zero_evecs),
dim=-1,
output_device=data_data_evals.device,
)
data_data_evals = torch.cat((data_data_evals, zero_evals), dim=-1)
# construct K_{xt, x}
test_data_covar = self.covar_module.data_covar_module(X, train_x)
# construct K_{xt, xt}
test_test_covar = self.covar_module.data_covar_module(X)
# now update root so that \tilde{R}\tilde{R}' \approx K_{(x,xt), (x,xt)}
# cloning preserves the gradient history
updated_lazy_tensor = data_data_covar.cat_rows(
cross_mat=test_data_covar.clone(),
new_mat=test_test_covar,
method="diagonalization",
)
updated_root = updated_lazy_tensor.root_decomposition().root
# occasionally, there's device errors so enforce this comes out right
updated_root = updated_root.to(data_data_covar.device)
# build a root decomposition of the joint train/test covariance matrix
# construct (\tilde{R} \otimes M)(\tilde{R} \otimes M)' \approx
# (K_{(x,xt), (x,xt)} \otimes Ktt)
joint_covar = RootLazyTensor(
KroneckerProductLazyTensor(updated_root, task_covar_rootlt.root.detach())
)
# construct K_{xt, x} \otimes Ktt
test_obs_kernel = KroneckerProductLazyTensor(test_data_covar, task_covar)
# collect y - \mu(x) and \mu(X)
train_diff = self.train_targets - self.mean_module(train_x)
if detach_test_caches.on():
train_diff = train_diff.detach()
test_mean = self.mean_module(X)
train_noise = self.likelihood._shaped_noise_covar(train_x.shape)
diagonal_noise = isinstance(train_noise, DiagLazyTensor)
if detach_test_caches.on():
train_noise = train_noise.detach()
test_noise = (
self.likelihood._shaped_noise_covar(X.shape) if observation_noise else None
)
# predictive mean and variance for the mvn
# first the predictive mean
pred_mean = (
test_obs_kernel.matmul(self.predictive_mean_cache).reshape_as(test_mean)
+ test_mean
)
# next the predictive variance, assume diagonal noise
test_var_term = KroneckerProductLazyTensor(test_test_covar, task_covar).diag()
if diagonal_noise:
task_evals, task_evecs = self._task_covar_matrix.diagonalization()
# TODO: make this be the default KPMatmulLT diagonal method in gpytorch
full_data_inv_evals = (
KroneckerProductDiagLazyTensor(
DiagLazyTensor(data_data_evals), DiagLazyTensor(task_evals)
)
+ train_noise
).inverse()
test_train_hadamard = KroneckerProductLazyTensor(
test_data_covar.matmul(data_data_evecs).evaluate() ** 2,
task_covar.matmul(task_evecs).evaluate() ** 2,
)
data_var_term = test_train_hadamard.matmul(full_data_inv_evals).sum(dim=-1)
else:
# if non-diagonal noise (but still kronecker structured), we have to pull
# across the noise because the inverse is not closed form
# should be a kronecker lt, R = \Sigma_X^{-1/2} \kron \Sigma_T^{-1/2}
# TODO: enforce the diagonalization to return a KPLT for all shapes in
# gpytorch or dense linear algebra for small shapes
data_noise, task_noise = train_noise.lazy_tensors
data_noise_root = data_noise.root_inv_decomposition(
method="diagonalization"
)
task_noise_root = task_noise.root_inv_decomposition(
method="diagonalization"
)
# ultimately we need to compute the diagonal of
# (K_{x* X} \kron K_T)(K_{XX} \kron K_T + \Sigma_X \kron \Sigma_T)^{-1}
# (K_{x* X} \kron K_T)^T
# = (K_{x* X} \Sigma_X^{-1/2} Q_R)(\Lambda_R + I)^{-1}
# (K_{x* X} \Sigma_X^{-1/2} Q_R)^T
# where R = (\Sigma_X^{-1/2T}K_{XX}\Sigma_X^{-1/2} \kron
# \Sigma_T^{-1/2T}K_{T}\Sigma_T^{-1/2})
# first we construct the components of R's eigen-decomposition
# TODO: make this be the default KPMatmulLT diagonal method in gpytorch
whitened_data_covar = (
data_noise_root.transpose(-1, -2)
.matmul(data_data_covar)
.matmul(data_noise_root)
)
w_data_evals, w_data_evecs = whitened_data_covar.diagonalization()
whitened_task_covar = (
task_noise_root.transpose(-1, -2)
.matmul(self._task_covar_matrix)
.matmul(task_noise_root)
)
w_task_evals, w_task_evecs = whitened_task_covar.diagonalization()
# we add one to the eigenvalues as above (not just for stability)
full_data_inv_evals = (
KroneckerProductDiagLazyTensor(
DiagLazyTensor(w_data_evals), DiagLazyTensor(w_task_evals)
)
.add_jitter(1.0)
.inverse()
)
test_data_comp = (
test_data_covar.matmul(data_noise_root).matmul(w_data_evecs).evaluate()
** 2
)
task_comp = (
task_covar.matmul(task_noise_root).matmul(w_task_evecs).evaluate() ** 2
)
test_train_hadamard = KroneckerProductLazyTensor(test_data_comp, task_comp)
data_var_term = test_train_hadamard.matmul(full_data_inv_evals).sum(dim=-1)
pred_variance = test_var_term - data_var_term
specialized_mvn = MultitaskMultivariateNormal(
pred_mean, DiagLazyTensor(pred_variance)
)
if observation_noise:
specialized_mvn = self.likelihood(specialized_mvn)
posterior = MultitaskGPPosterior(
mvn=specialized_mvn,
joint_covariance_matrix=joint_covar,
test_train_covar=test_obs_kernel,
train_diff=train_diff,
test_mean=test_mean,
train_train_covar=self.train_full_covar,
train_noise=train_noise,
test_noise=test_noise,
)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
return posterior
def train(self, val=True, *args, **kwargs):
if val:
fixed_cache_names = ["data_data_roots", "train_full_covar", "task_root"]
for name in fixed_cache_names:
try:
pop_from_cache(self, name)
except CachingError:
pass
return super().train(val, *args, **kwargs)
| [
"torch.Size",
"torch.zeros",
"torch.cat",
"torch.arange",
"torch.no_grad"
] | 1.6 | CompRhys/botorch | 6965426853b7c2d61244f6874eff3317b3588554 |
1.8 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import math
import torch
import torch.utils.checkpoint
import model.lora as lora
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN, gelu
from transformers.adapters.model_mixin import ModelWithHeadsAdaptersMixin
from transformers.adapters.models.bert import (
BertEncoderAdaptersMixin,
BertLayerAdaptersMixin,
BertModelAdaptersMixin,
BertModelHeadsMixin,
BertOutputAdaptersMixin,
BertSelfOutputAdaptersMixin,
)
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# self.query = nn.Linear(config.hidden_size, self.all_head_size)
# self.key = nn.Linear(config.hidden_size, self.all_head_size)
if config.lora:
self.query = lora.Linear(config.hidden_size, self.all_head_size, config.lora_r, lora_alpha=config.lora_alpha)
# print(config.lora_r)
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
if config.lora:
self.value = lora.Linear(config.hidden_size, self.all_head_size, config.lora_r, lora_alpha=config.lora_alpha)
else:
self.value = nn.Linear(config.hidden_size, self.all_head_size)
# self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.modeling_bert.BertSelfOutput
class RobertaSelfOutput(BertSelfOutputAdaptersMixin, nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self._init_adapter_modules()
def forward(self, hidden_states, input_tensor, **kwargs):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.adapters_forward(hidden_states, input_tensor, **kwargs)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = RobertaSelfAttention(config)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
**kwargs
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states, **kwargs)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(BertOutputAdaptersMixin, nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self._init_adapter_modules()
def forward(self, hidden_states, input_tensor, **kwargs):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.adapters_forward(hidden_states, input_tensor, **kwargs)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(BertLayerAdaptersMixin, nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RobertaAttention(config)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
**kwargs
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
**kwargs,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, **kwargs
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output, **kwargs):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output, **kwargs)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(BertEncoderAdaptersMixin, nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
self.drop_layer = -1
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
**kwargs
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
# rmlayers = [9,10,11]
past_key_value = past_key_values[i] if past_key_values is not None else None
# for j in range(i):
# past_key_value = past_key_value + past_key_values[j]
# if i > 0:
# past_key_value = past_key_value + past_key_values[i-1]
# if i in rmlayers:
# # print(attention_mask)
# attention_mask_new = attention_mask[:, :, :, self.config.pre_seq_len:]
# else:
# attention_mask_new = attention_mask[:]
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
**kwargs,
)
hidden_states = layer_outputs[0]
attention_mask = self.adjust_attention_mask_for_parallel(hidden_states, attention_mask)
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, RobertaEncoder):
module.gradient_checkpointing = value
def update_keys_to_ignore(self, config, del_keys_to_ignore):
"""Remove some keys from ignore list"""
if not config.tie_word_embeddings:
# must make a new list, or the class variable gets modified!
self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
self._keys_to_ignore_on_load_missing = [
k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
]
ROBERTA_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(BertModelAdaptersMixin, RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self._init_adapter_modules()
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
self.pre_transformer_forward(**kwargs)
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
embedding_output = self.invertible_adapters_forward(embedding_output)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
**kwargs,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""Roberta Model transformer with the option to add multiple flexible heads on top.""",
ROBERTA_START_DOCSTRING,
)
class RobertaModelWithHeads(BertModelHeadsMixin, RobertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self._init_head_modules()
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="roberta-base",
output_type=ModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
adapter_names=None,
head=None,
**kwargs
):
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
adapter_names=adapter_names,
)
# BERT & RoBERTa return the pooled output as second item, we don't need that in these heads
if not return_dict:
head_inputs = (outputs[0],) + outputs[2:]
else:
head_inputs = outputs
pooled_output = outputs[1]
if head or self.active_head:
head_outputs = self.forward_head(
head_inputs,
head_name=head,
attention_mask=attention_mask,
return_dict=return_dict,
pooled_output=pooled_output,
**kwargs,
)
return head_outputs
else:
# in case no head is used just return the output of the base model (including pooler output)
return outputs
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
adapter_names=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(
sequence_output,
inv_lang_adapter=self.roberta.get_invertible_adapter(),
)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
adapter_names=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(
sequence_output,
inv_lang_adapter=self.roberta.get_invertible_adapter(),
)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, inv_lang_adapter=None, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
if inv_lang_adapter:
x = inv_lang_adapter(x, rev=True)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
adapter_names=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
adapter_names=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
adapter_names=adapter_names,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
adapter_names=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
adapter_names=adapter_names,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(ModelWithHeadsAdaptersMixin, RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
adapter_names=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Softmax",
"torch.einsum",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.tanh",
"torch.matmul",
"torch.nn.Embedding",
"torch.cumsum"
] | 1.8.1 | guanzhchen/PETuning | eb36327713e237ea95a8982ceabb71de5ca4b09d |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates.
from copy import deepcopy
import torch
from mmf.common.registry import registry
from mmf.models.base_model import BaseModel
from mmf.modules.encoders import MultiModalEncoderBase
from mmf.utils.build import build_classifier_layer
class UnimodalBase(MultiModalEncoderBase):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
def build(self):
encoders = self._build_encoders(self.config)
# Text Encoder mode
if "modal_encoder" not in self.config:
self.encoder = encoders[0]
# Modal encoder mode
elif "text_encoder" not in self.config:
self.encoder = encoders[1]
else:
raise RuntimeError(
"Unimodal Encoder can't have both text and modal encoder"
)
def forward(self, x, *args, **kwargs):
x = self.encoder(x, *args, **kwargs)
# Case of bert encoder, we only need pooled output
if not torch.is_tensor(x) and len(x) == 2:
x = x[1]
x = torch.flatten(x, start_dim=1)
return x
@registry.register_model("unimodal_text")
class UnimodalText(BaseModel):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/unimodal/text.yaml"
def build(self):
self.base = UnimodalBase(self.config)
# As the in_dim is dynamically calculated we need to copy classifier_config
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.in_dim = self.config.text_hidden_size
self.classifier = build_classifier_layer(classifier_config)
def forward(self, sample_list):
# BERT Based Encoders
args = []
if "input_ids" in sample_list:
text = sample_list.input_ids
args.append(sample_list.input_mask)
args.append(sample_list.segment_ids)
else:
text = sample_list.text
embedding = self.base(text, *args)
output = {}
output["scores"] = self.classifier(embedding)
return output
@registry.register_model("unimodal_image")
class UnimodalModal(BaseModel):
def __init__(self, config, *args, **kwargs):
super().__init__(config)
@classmethod
def config_path(cls):
return "configs/models/unimodal/image.yaml"
def build(self):
self.base = UnimodalBase(self.config)
self._is_direct_features_input = self.config.direct_features_input
num_features = self.config.modal_encoder.params.num_output_features
# As the in_dim is dynamically calculated we need to copy classifier_config
classifier_config = deepcopy(self.config.classifier)
classifier_config.params.in_dim = num_features * self.config.modal_hidden_size
self.classifier = build_classifier_layer(classifier_config)
def forward(self, sample_list):
# BERT Based Encoders
args = []
if self._is_direct_features_input:
modal = sample_list.image_feature_0
modal = torch.mean(modal, dim=1)
else:
modal = sample_list.image
embedding = self.base(modal, *args)
output = {}
output["scores"] = self.classifier(embedding)
return output
| [
"torch.is_tensor",
"torch.flatten",
"torch.mean"
] | 1.5.0 | anas-awadalla/mmf | 306f8f758831b2abf2c7ef5a8f010670a2cb33ed |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import tempfile
import unittest
import torch
from mmf.datasets.processors.processors import (
CaptionProcessor,
EvalAIAnswerProcessor,
MultiClassFromFile,
MultiHotAnswerFromVocabProcessor,
TransformerBboxProcessor,
)
from mmf.utils.configuration import load_yaml
from omegaconf import OmegaConf
from ..test_utils import compare_tensors
class TestDatasetProcessors(unittest.TestCase):
def _get_config(self, path):
path = os.path.join(os.path.abspath(__file__), path)
config = load_yaml(os.path.abspath(path))
return config
def test_caption_processor(self):
config = self._get_config("../../../mmf/configs/datasets/coco/defaults.yaml")
captioning_config = config.dataset_config.coco
caption_processor_config = captioning_config.processors.caption_processor
vocab_path = os.path.join(
os.path.abspath(__file__), "..", "..", "data", "vocab.txt"
)
caption_processor_config.params.vocab.type = "random"
caption_processor_config.params.vocab.vocab_file = os.path.abspath(vocab_path)
caption_processor = CaptionProcessor(caption_processor_config.params)
tokens = [1, 4, 5, 6, 4, 7, 8, 2, 0, 0, 0]
caption = caption_processor(tokens)
# Test start, stop, pad are removed
self.assertNotIn("<s>", caption["tokens"])
self.assertNotIn("</s>", caption["tokens"])
self.assertNotIn("<pad>", caption["tokens"])
# Test caption is correct
self.assertEqual(caption["caption"], "a man with a red helmet")
def test_multi_hot_answer_from_vocab_processor(self):
config = self._get_config("../../../mmf/configs/datasets/clevr/defaults.yaml")
clevr_config = config.dataset_config.clevr
answer_processor_config = clevr_config.processors.answer_processor
# Test num_answers==1 case
vocab_path = os.path.join(
os.path.abspath(__file__), "..", "..", "data", "vocab.txt"
)
answer_processor_config.params.vocab_file = os.path.abspath(vocab_path)
answer_processor = MultiHotAnswerFromVocabProcessor(
answer_processor_config.params
)
processed = answer_processor({"answers": ["helmet"]})
answers_indices = processed["answers_indices"]
answers_scores = processed["answers_scores"]
self.assertTrue(
compare_tensors(answers_indices, torch.tensor([5] * 10, dtype=torch.long))
)
expected_answers_scores = torch.zeros(19, dtype=torch.float)
expected_answers_scores[5] = 1.0
self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))
# Test multihot when num answers greater than 1
answer_processor_config.params.vocab_file = os.path.abspath(vocab_path)
answer_processor_config.params.num_answers = 3
answer_processor = MultiHotAnswerFromVocabProcessor(
answer_processor_config.params
)
processed = answer_processor({"answers": ["man", "with", "countryside"]})
answers_indices = processed["answers_indices"]
answers_scores = processed["answers_scores"]
self.assertTrue(
compare_tensors(
answers_indices,
torch.tensor([2, 3, 15, 2, 3, 15, 2, 3, 15, 2], dtype=torch.long),
)
)
expected_answers_scores = torch.zeros(19, dtype=torch.float)
expected_answers_scores[2] = 1.0
expected_answers_scores[3] = 1.0
expected_answers_scores[15] = 1.0
self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))
# Test unk
processed = answer_processor({"answers": ["test", "answer", "man"]})
answers_indices = processed["answers_indices"]
answers_scores = processed["answers_scores"]
self.assertTrue(
compare_tensors(
answers_indices,
torch.tensor([0, 0, 2, 0, 0, 2, 0, 0, 2, 0], dtype=torch.long),
)
)
expected_answers_scores = torch.zeros(19, dtype=torch.float)
expected_answers_scores[2] = 1.0
self.assertTrue(compare_tensors(answers_scores, expected_answers_scores))
def test_evalai_answer_processor(self):
evalai_answer_processor = EvalAIAnswerProcessor()
# Test number
processed = evalai_answer_processor("two")
expected = "2"
self.assertEqual(processed, expected)
# Test article
processed = evalai_answer_processor("a building")
expected = "building"
self.assertEqual(processed, expected)
# Test tokenize
processed = evalai_answer_processor("snow, mountain")
expected = "snow mountain"
self.assertEqual(processed, expected)
# Test contractions
processed = evalai_answer_processor("isnt")
expected = "isn't"
self.assertEqual(processed, expected)
# Test processor
processed = evalai_answer_processor("the two mountain's \t \n ")
expected = "2 mountain 's"
self.assertEqual(processed, expected)
def test_transformer_bbox_processor(self):
import numpy as np
config = {
"params": {
"bbox_key": "bbox",
"image_width_key": "image_width",
"image_height_key": "image_height",
}
}
bbox_processor = TransformerBboxProcessor(config)
item = {
"bbox": np.array([[100, 100, 100, 100]]),
"image_width": 100,
"image_height": 100,
}
processed_box = bbox_processor(item)["bbox"]
self.assertTrue(
torch.equal(
processed_box, torch.tensor([[1, 1, 1, 1, 0]], dtype=torch.float)
)
)
def test_multi_class_from_file(self):
f = tempfile.NamedTemporaryFile(mode="w", delete=False)
f.writelines("\n".join(["abc", "bcd", "def", "efg"]))
f.close()
config = OmegaConf.create({"vocab_file": f.name})
processor = MultiClassFromFile(config)
output = processor({"label": "abc"})
self.assertEqual(output["class_index"], 0)
output = processor({"label": "efg"})
self.assertEqual(output["class_index"], 3)
output = processor("def")
self.assertEqual(output["class_index"], 2)
self.assertRaises(AssertionError, processor, {"label": "UNK"})
os.unlink(f.name)
| [
"torch.zeros",
"torch.tensor"
] | 1.5.0 | anas-awadalla/mmf | 306f8f758831b2abf2c7ef5a8f010670a2cb33ed |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates.
import glob
import importlib
import logging
import os
import sys
import warnings
import torch
from mmf.common.registry import registry
from mmf.utils.configuration import get_mmf_env, load_yaml
from mmf.utils.distributed import is_master, synchronize
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import updir
from omegaconf import OmegaConf
try:
import git
except ImportError:
git = None
logger = logging.getLogger(__name__)
def _hack_imports():
# NOTE: This can probably be made universal to support backwards
# compatibility with name "pythia" if needed.
sys.modules["pythia"] = importlib.import_module("mmf")
sys.modules["pythia.utils.configuration"] = importlib.import_module(
"mmf.utils.configuration"
)
def load_pretrained_model(model_name_or_path, *args, **kwargs):
# If this is a file, then load this directly else download and load
if PathManager.exists(model_name_or_path):
download_path = model_name_or_path
model_name = model_name_or_path
else:
download_path = download_pretrained_model(model_name_or_path, *args, **kwargs)
model_name = model_name_or_path
configs = glob.glob(os.path.join(download_path, "*.yaml"))
assert len(configs) <= 1, (
"Multiple yaml files with the pretrained model. "
+ "MMF doesn't know what to do."
)
ckpts = []
allowed_ckpt_types = ("*.ckpt", "*.pth", "*.pt")
for ckpt_type in allowed_ckpt_types:
ckpts.extend(glob.glob(os.path.join(download_path, ckpt_type)))
assert (
len(ckpts) == 1
), "None or multiple checkpoints files. MMF doesn't know what to do."
_hack_imports()
with PathManager.open(ckpts[0], "rb") as f:
ckpt = torch.load(f, map_location=lambda storage, loc: storage)
# If configs are not present, will ckpt provide the config?
if len(configs) == 0:
assert "config" in ckpt, (
"No configs provided with pretrained model "
" while checkpoint also doesn't have configuration."
)
config = ckpt["config"]
else:
config = load_yaml(configs[0])
model_config = config.get("model_config", config)
ckpt = ckpt.get("model", ckpt)
# Also handle the case of model_name is path
model_config = model_config.get(model_name.split(os.path.sep)[-1].split(".")[0])
return {"config": model_config, "checkpoint": ckpt, "full_config": config}
class Checkpoint:
def __init__(self, trainer):
"""
Generates a path for saving model which can also be used for resuming
from a checkpoint.
"""
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = get_mmf_env(key="save_dir")
self.model_name = self.config.model
self.ckpt_foldername = self.save_dir
self.device = registry.get("current_device")
self.ckpt_prefix = ""
if hasattr(self.trainer.model, "get_ckpt_name"):
self.ckpt_prefix = self.trainer.model.get_ckpt_name() + "_"
self.pth_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + self.model_name + "_final.pth"
)
self.models_foldername = os.path.join(self.ckpt_foldername, "models")
if not PathManager.exists(self.models_foldername):
PathManager.mkdirs(self.models_foldername)
self.save_config()
self.repo_path = updir(os.path.abspath(__file__), n=3)
self.git_repo = None
if git and self.config.checkpoint.save_git_details:
try:
self.git_repo = git.Repo(self.repo_path)
except git.exc.InvalidGitRepositoryError:
# Not a git repo, don't do anything
pass
self.max_to_keep = self.config.checkpoint.max_to_keep
self.saved_iterations = []
def save_config(self):
cfg_file = os.path.join(self.ckpt_foldername, "config.yaml")
with PathManager.open(cfg_file, "w") as f:
f.write(self.config.pretty(resolve=True))
def load_state_dict(self):
ckpt_config = self.config.checkpoint
suffix = "best.ckpt" if ckpt_config.resume_best else "current.ckpt"
reverse_suffix = "best.ckpt" if not ckpt_config.resume_best else "current.ckpt"
ckpt_filepath = os.path.join(self.ckpt_foldername, self.ckpt_prefix + suffix)
# In case of interrupts and resume, ckpt_config.resume_file would be there
# But, if the checkpoints are already created in the save dir
# and resume is true signifying the interrupt resume, we should skip
# loading the resume file.
if (
ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None
) and (not ckpt_config.resume or not PathManager.exists(ckpt_filepath)):
if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):
self._load(
ckpt_config.resume_file,
load_pretrained=ckpt_config.resume_pretrained,
)
return
# resume_file doesn't exist, try from zoo now
elif ckpt_config.resume_zoo is not None:
self._load(
ckpt_config.resume_zoo,
load_zoo=True,
load_pretrained=ckpt_config.resume_pretrained,
)
return
else:
raise RuntimeError(f"{ckpt_config.resume_file} doesn't exist")
if ckpt_config.resume:
if PathManager.exists(ckpt_filepath):
self._load(ckpt_filepath)
else:
warnings.warn(
"Tried to resume but checkpoint filepath {} "
"is not present. Trying {}, otherwise skipping.".format(
ckpt_filepath, reverse_suffix
)
)
ckpt_filepath = ckpt_filepath.replace(suffix, reverse_suffix)
if PathManager.exists(ckpt_filepath):
self._load(ckpt_filepath)
def _load(self, file, force=False, load_zoo=False, load_pretrained=False):
ckpt_config = self.config.checkpoint
logger.info("Loading checkpoint")
if load_zoo:
ckpt, should_continue = self._load_from_zoo(file)
if not should_continue:
return
else:
ckpt = self._torch_load(file)
if "model" in ckpt:
ckpt_model = ckpt["model"]
else:
ckpt_model = ckpt
ckpt = {"model": ckpt}
pretrained_state_mapping = ckpt_config.pretrained_state_mapping
if not load_pretrained or force is True:
pretrained_state_mapping = {}
new_dict = {}
new_dict = self.upgrade_state_dict(ckpt_model)
if len(pretrained_state_mapping.items()) == 0:
final_dict = new_dict
self.trainer.model.load_state_dict(final_dict, strict=False)
reset_optimizer = ckpt_config.reset.optimizer or ckpt_config.reset.all
if not reset_optimizer:
self._load_optimizer(ckpt)
self.trainer.early_stop_callback.early_stopping.init_from_checkpoint(ckpt)
reset_counts = ckpt_config.reset.all or ckpt_config.reset.counts
if not reset_counts:
self._load_counts_and_lr_scheduler(ckpt)
else:
self._load_pretrained(new_dict)
logger.info("Checkpoint loaded.")
logger.info(f"Current num updates: {self.trainer.num_updates}")
logger.info(f"Current iteration: {self.trainer.current_iteration}")
logger.info(f"Current epoch: {self.trainer.current_epoch}")
def _load_optimizer(self, ckpt):
if "optimizer" in ckpt:
try:
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
except ValueError:
logger.info(
"Optimizer failed to load. Try with "
+ "checkpoint.reset.optimizer=True"
)
raise
else:
warnings.warn(
"'optimizer' key is not present in the "
"checkpoint asked to be loaded. Skipping."
)
def _load_counts_and_lr_scheduler(self, ckpt):
ckpt_config = self.trainer.config.checkpoint
if "best_update" in ckpt:
if ckpt_config.resume_best:
self.trainer.num_updates = ckpt.get(
"best_update", self.trainer.num_updates
)
self.trainer.current_iteration = ckpt.get(
"best_iteration", self.trainer.current_iteration
)
else:
self.trainer.num_updates = ckpt.get(
"num_updates", self.trainer.num_updates
)
self.trainer.current_iteration = ckpt.get(
"current_iteration", self.trainer.current_iteration
)
self.trainer.current_epoch = ckpt.get(
"current_epoch", self.trainer.current_epoch
)
elif "best_iteration" in ckpt:
# Preserve old behavior for old checkpoints where we always
# load best iteration
if ckpt_config.resume_best and "current_iteration" in ckpt:
self.trainer.current_iteration = ckpt["current_iteration"]
else:
self.trainer.current_iteration = ckpt.get(
"best_iteration", self.trainer.current_iteration
)
self.trainer.num_updates = self.trainer.current_iteration
lr_scheduler = self.trainer.lr_scheduler_callback._scheduler
if lr_scheduler is not None:
if "lr_scheduler" in ckpt:
lr_scheduler.load_state_dict(ckpt["lr_scheduler"])
else:
warnings.warn(
"'lr_scheduler' key is not present in the "
"checkpoint asked to be loaded. Setting lr_scheduler's "
"last_epoch to current_iteration."
)
lr_scheduler.last_epoch = self.trainer.current_iteration
registry.register("current_iteration", self.trainer.current_iteration)
registry.register("num_updates", self.trainer.num_updates)
self.trainer.current_epoch = ckpt.get("best_epoch", self.trainer.current_epoch)
registry.register("current_epoch", self.trainer.current_epoch)
def _load_pretrained(self, ckpt):
model = self.trainer.model
own_state = model.state_dict()
mapping = self.trainer.config.checkpoint.pretrained_state_mapping
for key, value in mapping.items():
key += "."
value += "."
for attr in ckpt:
for own_attr in own_state:
if hasattr(model, "format_state_key"):
formatted_attr = model.format_state_key(attr)
else:
formatted_attr = attr
if (
key in own_attr
and value in formatted_attr
and own_attr.replace(key, "")
== formatted_attr.replace(value, "")
):
logger.info("Copying " + own_attr + " from " + attr)
own_state[own_attr].copy_(ckpt[attr])
logger.info("Pretrained model loaded")
def upgrade_state_dict(self, state_dict):
data_parallel = registry.get("data_parallel") or registry.get("distributed")
data_parallel = data_parallel or isinstance(
self.trainer.model,
(torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel),
)
new_dict = {}
for attr in state_dict:
new_attr = attr
if not data_parallel and attr.startswith("module."):
# In case the ckpt was actually a data parallel model
# replace first module. from dataparallel with empty string
new_dict[new_attr.replace("module.", "", 1)] = state_dict[attr]
elif data_parallel and not attr.startswith("module."):
new_dict["module." + new_attr] = state_dict[attr]
else:
new_dict[new_attr] = state_dict[attr]
return new_dict
def _load_from_zoo(self, file):
ckpt_config = self.trainer.config.checkpoint
zoo_ckpt = load_pretrained_model(file)
# If zoo_config_override, load the model directly using `from_pretrained`
if ckpt_config.zoo_config_override:
model_cls = registry.get_model_class(self.trainer.config.model)
self.trainer.model = model_cls.from_pretrained(ckpt_config.resume_zoo)
self.trainer.config.model_config = zoo_ckpt["full_config"].model_config
return None, False
else:
return self.upgrade_state_dict(zoo_ckpt["checkpoint"]), True
def _torch_load(self, file):
# Backwards compatibility to Pythia
_hack_imports()
with PathManager.open(file, "rb") as f:
if "cuda" in str(self.device):
return torch.load(f, map_location=self.device)
else:
return torch.load(f, map_location=lambda storage, loc: storage)
def _get_vcs_fields(self):
"""Returns a dict with git fields of the current repository
To reproduce an experiment directly from a checkpoint
1) Export `config` key as a yaml
2) Clone repository and checkout at given commit on given branch
3) Any local change (diff) while running the experiment is stored
in the value with key `git/diff`, output the diff to a `path.diff`
file and apply the patch to the current state by simply
`patch -p0 < path.diff`
"""
return {
"git/branch": self.git_repo.active_branch.name,
"git/commit_hash": self.git_repo.head.commit.name_rev,
"git/commit_author": self.git_repo.head.commit.author.name,
"git/commit_message": self.git_repo.head.commit.message,
"git/diff": self.git_repo.git.diff("--no-prefix"),
}
def save(self, update, iteration=None, update_best=False):
# Only save in main process
if not is_master():
return
if not iteration:
iteration = update
ckpt_filepath = os.path.join(self.models_foldername, "model_%d.ckpt" % update)
best_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
current_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "current.ckpt"
)
best_iteration = (
self.trainer.early_stop_callback.early_stopping.best_monitored_iteration
)
best_update = (
self.trainer.early_stop_callback.early_stopping.best_monitored_update
)
best_metric = (
self.trainer.early_stop_callback.early_stopping.best_monitored_value
)
model = self.trainer.model
data_parallel = registry.get("data_parallel") or registry.get("distributed")
if data_parallel is True:
model = model.module
ckpt = {
"model": model.state_dict(),
"optimizer": self.trainer.optimizer.state_dict(),
"best_iteration": best_iteration,
"current_iteration": iteration,
"current_epoch": self.trainer.current_epoch,
"num_updates": update,
"best_update": best_update,
"best_metric_value": best_metric,
# Convert to container to avoid any dependencies
"config": OmegaConf.to_container(self.config, resolve=True),
}
lr_scheduler = self.trainer.lr_scheduler_callback._scheduler
if lr_scheduler is not None:
ckpt["lr_scheduler"] = lr_scheduler.state_dict()
if self.git_repo:
git_metadata_dict = self._get_vcs_fields()
ckpt.update(git_metadata_dict)
with PathManager.open(ckpt_filepath, "wb") as f:
torch.save(ckpt, f)
if update_best:
with PathManager.open(best_ckpt_filepath, "wb") as f:
torch.save(ckpt, f)
# Save current always
with PathManager.open(current_ckpt_filepath, "wb") as f:
torch.save(ckpt, f)
# Remove old checkpoints if max_to_keep is set
if self.max_to_keep > 0:
if len(self.saved_iterations) == self.max_to_keep:
self.remove(self.saved_iterations.pop(0))
self.saved_iterations.append(update)
def remove(self, update):
ckpt_filepath = os.path.join(self.models_foldername, "model_%d.ckpt" % update)
if PathManager.isfile(ckpt_filepath):
PathManager.rm(ckpt_filepath)
def restore(self):
synchronize()
logger.info("Restoring checkpoint")
best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + "best.ckpt")
if PathManager.exists(best_path):
self._load(best_path, force=True)
def finalize(self):
if is_master():
with PathManager.open(self.pth_filepath, "wb") as f:
torch.save(self.trainer.model.state_dict(), f)
| [
"torch.save",
"torch.load"
] | 1.5.0 | anas-awadalla/mmf | 306f8f758831b2abf2c7ef5a8f010670a2cb33ed |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates.
import torch
from mmf.common.registry import registry
from torch import nn
from torch.nn.utils.weight_norm import weight_norm
class VisDialDiscriminator(nn.Module):
def __init__(self, config, embedding):
super().__init__()
self.config = config
self.embedding = embedding
self.emb_out_dim = embedding.text_out_dim
self.hidden_dim = self.config.hidden_dim
self.projection_layer = nn.Linear(self.emb_out_dim, self.hidden_dim)
def forward(self, encoder_output, batch):
answer_options_len = batch["answer_options_len"]
# BATCH_SIZE X DIALOGUES X 100 X SEQ_LEN
answer_options = batch["answer_options"]
max_seq_len = answer_options.size(-1)
batch_size, ndialogues, noptions, seq_len = answer_options.size()
# (B X D X 100) X SEQ_LEN
answer_options = answer_options.view(-1, max_seq_len)
answer_options_len = answer_options_len.view(-1)
# (B x D x 100) x EMB_OUT_DIM
answer_options = self.embedding(answer_options)
# (B x D x 100) x HIDDEN_DIM
answer_options = self.projection_layer(answer_options)
# (B x D) x 100 x HIDDEN_DIM
answer_options = answer_options.view(
batch_size * ndialogues, noptions, self.hidden_dim
)
# (B x D) x HIDDEN_DIM => (B x D) x 100 x HIDDEN_DIM
encoder_output = encoder_output.unsqueeze(1).expand(-1, noptions, -1)
# (B x D) x 100 x HIDDEN_DIM * (B x D) x 100 x HIDDEN_DIM = SAME THING
# SUM => (B x D) x 100
scores = torch.sum(answer_options * encoder_output, dim=2)
return scores
class LanguageDecoder(nn.Module):
def __init__(self, in_dim, out_dim, **kwargs):
super().__init__()
self.language_lstm = nn.LSTMCell(
in_dim + kwargs["hidden_dim"], kwargs["hidden_dim"], bias=True
)
self.fc = weight_norm(nn.Linear(kwargs["hidden_dim"], out_dim))
self.dropout = nn.Dropout(p=kwargs["dropout"])
self.init_weights(kwargs["fc_bias_init"])
def init_weights(self, fc_bias_init):
self.fc.bias.data.fill_(fc_bias_init)
self.fc.weight.data.uniform_(-0.1, 0.1)
def forward(self, weighted_attn):
# Get LSTM state
state = registry.get(f"{weighted_attn.device}_lstm_state")
h1, c1 = state["td_hidden"]
h2, c2 = state["lm_hidden"]
# Language LSTM
h2, c2 = self.language_lstm(torch.cat([weighted_attn, h1], dim=1), (h2, c2))
predictions = self.fc(self.dropout(h2))
# Update hidden state for t+1
state["lm_hidden"] = (h2, c2)
return predictions
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.LSTMCell",
"torch.sum"
] | 1.5.0 | anas-awadalla/mmf | 306f8f758831b2abf2c7ef5a8f010670a2cb33ed |
1.0 | from abc import abstractmethod
from pandas import DataFrame
from os.path import exists
from numpy import argmax
from torch.nn import Module, Conv2d
from torch.nn import functional as F
from torch import load as loadModel
from cnn.MixedFilter import MixedConvBNWithReLU as MixedConvWithReLU
from cnn.uniq_loss import UniqLoss
import cnn.statistics
from cnn.HtmlLogger import HtmlLogger
from UNIQ.quantize import check_quantization
# from torch import save as saveModel
# from torch import ones, zeros, no_grad, cat, tensor
# from torch.nn import CrossEntropyLoss
# preForward hook for training weights phase.
# when we train weights, we need to quantize staged layers before forward, and remove quantization after forward in order to update by gradient
# same for noise, we need to add noise before forward, and remove noise after forward, in order to update by gradient
def preForward(self, input):
deviceID = input[0].device.index
assert (deviceID not in self.hookDevices)
self.hookDevices.append(deviceID)
assert (self.training is True)
# update layers list to new DataParallel layers copies
self.layersList = self.buildLayersList()
# quantize staged layers
self.restoreQuantizationForStagedLayers()
# add noise to next to be staged layer
if self.nLayersQuantCompleted < self.nLayers():
layer = self.layersList[self.nLayersQuantCompleted]
assert (layer.added_noise is True)
for op in layer.opsList():
assert (op.noise is True)
op.add_noise()
def postForward(self, input, __):
deviceID = input[0].device.index
assert (deviceID in self.hookDevices)
self.hookDevices.remove(deviceID)
assert (self.training is True)
# remove quantization from staged layers
self.removeQuantizationFromStagedLayers()
# remove noise from next to be staged layer
if self.nLayersQuantCompleted < self.nLayers():
layer = self.layersList[self.nLayersQuantCompleted]
assert (layer.added_noise is True)
for op in layer.opsList():
assert (op.noise is True)
op.restore_state()
class BaseNet(Module):
# init bitwidth of input to model
modelInputBitwidth = 8
modelInputnFeatureMaps = 3
# counts the entire model bops in discrete mode
def countBopsDiscrete(self):
totalBops = 0
# input_bitwidth is a list of bitwidth per feature map
input_bitwidth = [self.modelInputBitwidth] * self.modelInputnFeatureMaps
for layer in self.layers:
totalBops += layer.getBops(input_bitwidth)
input_bitwidth = layer.getCurrentOutputBitwidth()
totalBops /= 1E9
return totalBops
def countBops(self):
# wrapper is needed because countBopsFuncs is defined outside __init__()
return self.countBopsFunc(self)
countBopsFuncs = dict(discrete=countBopsDiscrete)
alphasCsvFileName = 'alphas.csv'
def buildLayersList(self):
layersList = []
for layer in self.layers:
layersList.extend(layer.getLayers())
return layersList
def __init__(self, args, initLayersParams):
super(BaseNet, self).__init__()
# init save folder
saveFolder = args.save
# init layers
self.layers = self.initLayers(initLayersParams)
# build mixture layers list
self.layersList = self.buildLayersList()
# set bops counter function
self.countBopsFunc = self.countBopsFuncs[args.bopsCounter]
# init statistics
self.stats = cnn.statistics.Statistics(self.layersList, saveFolder)
# collect learnable params (weights)
self.learnable_params = [param for param in self.parameters() if param.requires_grad]
# init learnable alphas
self.learnable_alphas = self.getLearnableAlphas()
# init number of layers we have completed its quantization
self.nLayersQuantCompleted = 0
# calc init baseline bops
baselineBops = self.calcBaselineBops()
args.baselineBops = baselineBops[args.baselineBits[0]]
# plot baselines bops
self.stats.addBaselineBopsData(args, baselineBops)
# init criterion
self._criterion = UniqLoss(args)
self._criterion = self._criterion.cuda()
# init hooks handlers list
self.hooksList = []
# set hook flag, to make sure hook happens
# # turn it on on pre-forward hook, turn it off on post-forward hook
# self.hookFlag = False
self.hookDevices = []
self.printToFile(saveFolder)
# init layers permutation list
self.layersPerm = []
# init number of permutations counter
self.nPerms = 1
for layer in self.layersList:
# add layer numOps range to permutation list
self.layersPerm.append(list(range(len(layer.alphas))))
self.nPerms *= len(layer.alphas)
# init alphas DataFrame
self.alphas_df = None
self.__initAlphasDataFrame(saveFolder)
@abstractmethod
def initLayers(self, params):
raise NotImplementedError('subclasses must override initLayers()!')
@abstractmethod
def forward(self, x):
raise NotImplementedError('subclasses must override forward()!')
@abstractmethod
def switch_stage(self, logger=None):
raise NotImplementedError('subclasses must override switch_stage()!')
@abstractmethod
def loadUNIQPreTrained(self, checkpoint):
raise NotImplementedError('subclasses must override loadUNIQPreTrained()!')
@abstractmethod
def loadSingleOpPreTrained(self, checkpoint):
raise NotImplementedError('subclasses must override loadSingleOpPreTrained()!')
@abstractmethod
def turnOnWeights(self):
raise NotImplementedError('subclasses must override turnOnWeights()!')
def nLayers(self):
return len(self.layersList)
def getLearnableAlphas(self):
return [layer.alphas for layer in self.layersList if layer.alphas.requires_grad is True]
def updateLearnableAlphas(self):
self.learnable_alphas = self.getLearnableAlphas()
def arch_parameters(self):
return self.learnable_alphas
# layer_basis is a function of filter quantization,
# therefore we have to update its value bases on weight_max_int, which is a function of weights bitwidth
def __updateStatistics(self, loggerFuncs=[]):
for layer in self.layersList:
for op in layer.opsList():
conv = op.getModule(Conv2d)
# update layer_basis value based on weights bitwidth
conv.layer_basis = conv.initial_clamp_value / op.quantize.weight_max_int
for f in loggerFuncs:
f('Updated layer_basis according to bitwidth (weight_max_int)')
def loadPreTrained(self, path, logger, gpu):
# init bool flag whether we loaded ops in the same layer with equal or different weights
loadOpsWithDifferentWeights = False
loggerRows = []
loadSuccess = None
if path is not None:
if exists(path):
# load checkpoint
checkpoint = loadModel(path, map_location=lambda storage, loc: storage.cuda(gpu))
assert (checkpoint['updated_statistics'] is True)
chckpntStateDict = checkpoint['state_dict']
# load model state dict keys
modelStateDictKeys = set(self.state_dict().keys())
# compare dictionaries
dictDiff = modelStateDictKeys.symmetric_difference(set(chckpntStateDict.keys()))
# update flag value
loadOpsWithDifferentWeights = len(dictDiff) == 0
# decide how to load checkpoint state dict
if loadOpsWithDifferentWeights:
# load directly, keys are the same
self.load_state_dict(chckpntStateDict)
else:
# use some function to map keys
loadFuncs = [self.loadUNIQPreTrained, self.loadSingleOpPreTrained]
for func in loadFuncs:
loadSuccess = func(chckpntStateDict)
if loadSuccess is not False:
# update statistics if we don't load ops with different statistics
self.__updateStatistics(loggerFuncs=[lambda msg: loggerRows.append(['Statistics update', msg])])
break
if loadSuccess is not False:
# add info rows about checkpoint
loggerRows.append(['Path', '{}'.format(path)])
loggerRows.append(['Validation accuracy', '{:.5f}'.format(checkpoint['best_prec1'])])
loggerRows.append(['checkpoint[updated_statistics]', checkpoint['updated_statistics']])
# check if model includes stats
modelIncludesStats = False
for key in chckpntStateDict.keys():
if key.endswith('.layer_basis'):
modelIncludesStats = True
break
loggerRows.append(['Includes stats', '{}'.format(modelIncludesStats)])
# # =============================== Load alphas & plots ==========================================================
# self.load_alphas_state(checkpoint['alphas'])
# loggerRows.append(['Loaded alphas distribution', 'True'])
# p = '/home/yochaiz/F-BANNAS/cnn/results/[(2, 2), (2, 4), (3, 3), (8, 8)],[1.0],[cifar10],[20181113-212929]/plots.data'
# stats = self.stats
# stats.plotsData = loadModel(p)
# stats.batchLabels = stats.plotsData['alphas_entropy over epochs']['x']
#
# stats.containers[stats.lossAvgKey][0] = stats.plotsData['loss_avg over epochs']['data'][0]['y']
# stats.containers[stats.crossEntropyLossAvgKey][0] = stats.plotsData['cross_entropy_loss_avg over epochs']['data'][0]['y']
# stats.containers[stats.bopsLossAvgKey][0] = stats.plotsData['bops_loss_avg over epochs']['data'][0]['y']
# stats.containers[stats.lossVarianceKey][0] = stats.plotsData['loss_variance over epochs']['data'][0]['y']
#
# for i in range(len(stats.containers[stats.entropyKey])):
# stats.containers[stats.entropyKey][i] = stats.plotsData['alphas_entropy over epochs']['data'][i]['y']
#
# for i in range(len(stats.containers[stats.alphaDistributionKey])):
# key = 'alphas_distribution --layer:[{}]-- over epochs'.format(i)
# for j in range(len(stats.containers[stats.alphaDistributionKey][i])):
# stats.containers[stats.alphaDistributionKey][i][j] = stats.plotsData[key]['data'][j]['y']
#
# loggerRows.append(['Loaded plots data', p])
# # =============================================================================================================
else:
loggerRows.append(['Path', 'Failed to load pre-trained from [{}], state_dict does not fit'.format(path)])
else:
loggerRows.append(['Path', 'Failed to load pre-trained from [{}], path does not exists'.format(path)])
# load pre-trained model if we tried to load pre-trained
logger.addInfoTable('Pre-trained model', loggerRows)
return loadOpsWithDifferentWeights
# load weights for each filter from its uniform model, i.e. load 2-bits filter weights from uniform 2-bits model
# weights in uniform models are full-precision, i.e. before quantization
def loadUniformPreTrained(self, args, logger):
from collections import OrderedDict
from cnn.MixedFilter import QuantizedOp
from cnn.utils import loadCheckpoint
from torch.nn.modules import Linear, BatchNorm2d
def b(op, prefix):
keysList = []
for name, param in op._parameters.items():
if param is not None:
keysList.append(prefix + name)
for name, buf in op._buffers.items():
if buf is not None:
keysList.append(prefix + name)
for name, module in op._modules.items():
if module is not None:
keysList.extend(b(module, prefix + name + '.'))
return keysList
def a(model, dict, prefix=''):
for name, module in model._modules.items():
key = None
if isinstance(module, QuantizedOp):
key = module.getBitwidth()
# elif isinstance(module, BatchNorm2d) or isinstance(module, Linear):
elif isinstance(module, Linear):
key = (32, 32)
if key is not None:
if key not in dict.keys():
dict[key] = []
dict[key].extend(b(module, prefix + name + '.'))
else:
a(module, dict, prefix + name + '.')
modelDict = OrderedDict()
a(self, modelDict)
# transform downsamples keys
transformMap = [[(2, None), (2, 2)], [(3, None), (3, 3)], [(4, None), (4, 4)], [(8, None), (8, 8)]]
for srcBitwidth, dstBitwidth in transformMap:
if srcBitwidth in modelDict.keys():
modelDict[dstBitwidth].extend(modelDict[srcBitwidth])
del modelDict[srcBitwidth]
keysList = []
for bitwidth, bitwidthKeysList in modelDict.items():
keysList.extend(bitwidthKeysList)
modelStateDictKeys = set(self.state_dict().keys())
dictDiff = modelStateDictKeys.symmetric_difference(set(keysList))
assert (len(dictDiff) == 0)
stateDict = OrderedDict()
token1 = '.ops.'
token2 = '.op.'
for bitwidth, bitwidthKeysList in modelDict.items():
if bitwidth == (32, 32):
continue
checkpoint, _ = loadCheckpoint(args.dataset, args.model, bitwidth)
assert (checkpoint is not None)
chckpntStateDict = checkpoint['state_dict']
for key in bitwidthKeysList:
prefix = key[:key.index(token1)]
suffix = key[key.rindex(token2):]
# convert model key to checkpoint key
chckpntKey = prefix + token1 + '0.0' + suffix
# add value to dict
stateDict[key] = chckpntStateDict[chckpntKey]
# # load keys from (32, 32) checkpoint, no need to transform keys
bitwidth = (8, 8)
checkpoint, _ = loadCheckpoint(args.dataset, args.model, bitwidth) # , filename='model.updated_stats.pth.tar')
# checkpoint = loadModel("/home/vista/Desktop/Architecture_Search/ZZ/cifar100/resnet_[2#2,4#3#4#8]/pre_trained_checkpoint.pth.tar")
assert (checkpoint is not None)
chckpntStateDict = checkpoint['state_dict']
# map = self.buildStateDictMap(chckpntStateDict)
# invMap = {v: k for k, v in map.items()}
bitwidth = (32, 32)
for key in modelDict[bitwidth]:
stateDict[key] = chckpntStateDict[key]
# prefix = key[:key.rindex('.')]
# suffix = key[key.rindex('.'):]
# newKey = invMap[prefix]
# stateDict[key] = chckpntStateDict[newKey + suffix]
dictDiff = modelStateDictKeys.symmetric_difference(set(stateDict.keys()))
assert (len(dictDiff) == 0)
self.load_state_dict(stateDict)
logger.addInfoTable('Pre-trained model', [['Loaded each filter with filter from the corresponding bitwidth uniform model']])
def loss(self, logits, target):
return self._criterion(logits, target, self.countBops())
def turnOffAlphas(self):
for layer in self.layersList:
layer.alphas.grad = None
def calcBopsRatio(self):
return self._criterion.calcBopsRatio(self.countBops())
def choosePathByAlphas(self, loggerFuncs=[]):
for l in self.layers:
l.choosePathByAlphas()
logMsg = 'Model layers filters partition has been updated by alphas distribution'
for f in loggerFuncs:
f(logMsg)
# set curr_alpha_idx to each filter by alphas values
def setFiltersByAlphas(self, loggerFuncs=[]):
for layer in self.layersList:
layer.setFiltersPartitionByAlphas()
logMsg = 'Model layers filters partition has been updated by alphas values'
for f in loggerFuncs:
f(logMsg)
# returns list of layers filters partition
def getCurrentFiltersPartition(self):
return [layer.getCurrentFiltersPartition() for layer in self.layersList]
# partition is list of int tensors
# given a partition, set model filters accordingly
def setFiltersByPartition(self, partition, loggerFuncs=[]):
for layer, p in zip(self.layersList, partition):
layer.setFiltersPartition(p)
logMsg = 'Model layers filters partition has been updated by given partition'
for f in loggerFuncs:
f(logMsg)
def isQuantized(self):
for layerIdx, layer in enumerate(self.layersList):
assert (layer.quantized is True)
assert (layer.added_noise is False)
for opIdx, op in enumerate(layer.opsList()):
assert (check_quantization(op.getModule(Conv2d).weight) <= (2 ** op.bitwidth[0]))
return True
def setWeightsTrainingHooks(self):
assert (len(self.hooksList) == 0)
# assign pre & post forward hooks
self.hooksList = [self.register_forward_pre_hook(preForward), self.register_forward_hook(postForward)]
def removeWeightsTrainingHooks(self):
for handler in self.hooksList:
handler.remove()
# clear hooks handlers list
self.hooksList.clear()
# remove quantization from staged layers before training weights
# quantization will be set through pre-forward hook
# we keep ActQaunt.qunatize_during_training == True
def removeQuantizationFromStagedLayers(self):
for layerIdx in range(self.nLayersQuantCompleted):
layer = self.layersList[layerIdx]
assert (layer.quantized is True)
# remove quantization from layer ops
for op in layer.opsList():
op.restore_state()
# restore quantization for staged layers after training weights
# quantization will be set through pre-forward hook
# we keep ActQaunt.qunatize_during_training == True
def restoreQuantizationForStagedLayers(self):
for layerIdx in range(self.nLayersQuantCompleted):
layer = self.layersList[layerIdx]
assert (layer.quantized is True)
# refresh layer ops list. we want ops list to contain the ops DataParallel GPU copies
# quantize layer ops
for op in layer.opsList():
op.quantizeFunc()
assert (check_quantization(op.getModule(Conv2d).weight) <= (2 ** op.bitwidth[0]))
def quantizeUnstagedLayers(self):
# quantize model layers that haven't switched stage yet
# no need to turn gradients off, since with no_grad() does it
if self.nLayersQuantCompleted < self.nLayers():
# turn off noise if 1st unstaged layer
layer = self.layersList[self.nLayersQuantCompleted]
layer.turnOffNoise(self.nLayersQuantCompleted)
# quantize all unstaged layers
for layerIdx, layer in enumerate(self.layersList[self.nLayersQuantCompleted:]):
# quantize
layer.quantize(self.nLayersQuantCompleted + layerIdx)
assert (self.isQuantized() is True)
def unQuantizeUnstagedLayers(self):
# restore weights (remove quantization) of model layers that haven't switched stage yet
if self.nLayersQuantCompleted < self.nLayers():
for layerIdx, layer in enumerate(self.layersList[self.nLayersQuantCompleted:]):
# remove quantization
layer.unQuantize(self.nLayersQuantCompleted + layerIdx)
# add noise back to 1st unstaged layer
layer = self.layersList[self.nLayersQuantCompleted]
layer.turnOnNoise(self.nLayersQuantCompleted)
def resetForwardCounters(self):
for layer in self.layersList:
for filter in layer.filters:
# reset filter counters
filter.resetOpsForwardCounters()
# apply some function on baseline models
# baseline models are per each filter bitwidth
# this function create a map from baseline bitwidth to func() result on baseline model
def applyOnBaseline(self, func, applyOnAlphasDistribution=False):
baselineBops = {}
# save current model filters curr_alpha_idx
modelFiltersIdx = [[filter.curr_alpha_idx for filter in layer.filters] for layer in self.layersList]
# iterate over model layers
for layer in self.layersList:
# we want to iterate only over MixedConvWithReLU filters layer
if isinstance(layer.filters[0], MixedConvWithReLU):
# get layer filters bitwidth list
layerBitwidths = layer.getAllBitwidths()
# iterate over bitwidth and calc bops for their uniform model
for idx, bitwidth in enumerate(layerBitwidths):
# calc only for bitwidths that are not in baselineBops dictionary
if bitwidth not in baselineBops:
# if we need to calc bops for bitwidth uniform model, then we have to set filters curr_alpha_idx
for layer2 in self.layersList:
# get layer bitwidth list
layerBitwidths2 = layer2.getAllBitwidths()
# find target bitwidth in bitwidth list
if bitwidth in layerBitwidths2:
idx = layerBitwidths2.index(bitwidth)
else:
# if it is a MixedConv layer, then modify the bitwidth we are looking for
modifiedBitwidth = (bitwidth[0], None)
idx = layerBitwidths2.index(modifiedBitwidth)
# set layers curr_alpha_idx to target bitwidth index
for filter in layer2.filters:
filter.curr_alpha_idx = idx
# update bops value in dictionary
baselineBops[bitwidth] = func()
# apply on current alphas distribution
if applyOnAlphasDistribution:
self.setFiltersByAlphas()
# α is greek alpha symbol in HTML
baselineBops['α'] = func()
# restore filters curr_alpha_idx
for layer, layerFiltersIdx in zip(self.layersList, modelFiltersIdx):
for filter, filterIdx in zip(layer.filters, layerFiltersIdx):
filter.curr_alpha_idx = filterIdx
return baselineBops
# calc bops of uniform models, based on filters ops bitwidth
def calcBaselineBops(self):
return self.applyOnBaseline(self.countBops)
# return top k operations per layer
def topOps(self, k):
top = []
for layer in self.layersList:
# calc weights from alphas and sort them
weights = F.softmax(layer.alphas, dim=-1)
wSorted, wIndices = weights.sort(descending=True)
# keep only top-k
wSorted = wSorted[:k]
wIndices = wIndices[:k]
# get layer bitwidths
bitwidths = layer.getAllBitwidths()
# add to top
top.append([(i, w.item(), layer.alphas[i], bitwidths[i]) for w, i in zip(wSorted, wIndices)])
return top
# create list of tuples (layer index, layer alphas)
def save_alphas_state(self):
return [(i, layer.alphas) for i, layer in enumerate(self.layersList)]
def load_alphas_state(self, state, loggerFuncs=[]):
for layerIdx, alphas in state:
layerAlphas = self.layersList[layerIdx].alphas
device = layerAlphas.device
layerAlphas.data = alphas.data.to(device)
logMsg = 'Loaded alphas from checkpoint'
# log message to all loggers
for f in loggerFuncs:
f(logMsg)
def __initAlphasDataFrame(self, saveFolder):
if saveFolder:
# update save path if saveFolder exists
self.alphasCsvFileName = '{}/{}'.format(saveFolder, self.alphasCsvFileName)
# init DataFrame cols
cols = ['Epoch', 'Batch']
cols += ['Layer_{}'.format(i) for i in range(self.nLayers())]
self.cols = cols
# init DataFrame
self.alphas_df = DataFrame([], columns=cols)
# set init data
data = ['init', 'init']
# save alphas data
self.save_alphas_to_csv(data)
# save alphas values to csv
def save_alphas_to_csv(self, data):
if self.alphas_df is not None:
data += [[round(e.item(), 5) for e in layer.alphas] for layer in self.layersList]
# create new row
d = DataFrame([data], columns=self.cols)
# add row
self.alphas_df = self.alphas_df.append(d)
# save DataFrame
self.alphas_df.to_csv(self.alphasCsvFileName)
def logDominantQuantizedOp(self, k, loggerFuncs=[]):
if (not loggerFuncs) or (len(loggerFuncs) == 0):
return
rows = [['Layer #', 'Alphas']]
alphaCols = ['Index', 'Ratio', 'Value', 'Bitwidth']
top = self.topOps(k=k)
for i, layerTop in enumerate(top):
layerRow = [alphaCols]
for idx, w, alpha, bitwidth in layerTop:
alphaRow = [idx, '{:.5f}'.format(w), '{:.5f}'.format(alpha), bitwidth]
# add alpha data row to layer data table
layerRow.append(alphaRow)
# add layer data table to model table as row
rows.append([i, layerRow])
# apply loggers functions
for f in loggerFuncs:
f(k, rows)
def printToFile(self, saveFolder):
logger = HtmlLogger(saveFolder, 'model')
layerIdxKey = 'Layer#'
nFiltersKey = 'Filters#'
bitwidthsKey = 'Bitwidths'
filterArchKey = 'Filter Architecture'
alphasKey = 'Alphas distribution'
logger.createDataTable('Model architecture', [layerIdxKey, nFiltersKey, bitwidthsKey, filterArchKey])
for layerIdx, layer in enumerate(self.layersList):
bitwidths = layer.getAllBitwidths()
dataRow = {layerIdxKey: layerIdx, nFiltersKey: layer.nFilters(), bitwidthsKey: bitwidths, filterArchKey: next(layer.opsList())}
logger.addDataRow(dataRow)
# log layers alphas distribution
self.logDominantQuantizedOp(len(bitwidths), loggerFuncs=[lambda k, rows: logger.addInfoTable(alphasKey, rows)])
def logForwardCounters(self, loggerFuncs):
if (not loggerFuncs) or (len(loggerFuncs) == 0):
self.resetForwardCounters()
return
rows = [['Layer #', 'Counters']]
counterCols = ['Prev idx', 'bitwidth', 'Counter']
for layerIdx, layer in enumerate(self.layersList):
filter = layer.filters[0]
# sum counters of all filters by indices
countersByIndices = [[0] * len(filter.opsForwardCounters[0]) for _ in range(len(filter.opsForwardCounters))]
for filter in layer.filters:
for i, counterList in enumerate(filter.opsForwardCounters):
for j, counter in enumerate(counterList):
countersByIndices[i][j] += counter
# reset filter counters
filter.resetOpsForwardCounters()
# collect layer counters to 2 arrays:
# counters holds the counters values
# indices holds the corresponding counter value indices
counters, indices = [], []
for i in range(len(countersByIndices)):
for j in range(len(countersByIndices[0])):
counters.append(countersByIndices[i][j])
indices.append((i, j))
# get layer bitwidths
bitwidths = layer.getAllBitwidths()
# for each layer, sort counters in descending order
layerRows = [counterCols]
countersTotal = 0
while len(counters) > 0:
# find max counter and print it
maxIdx = argmax(counters)
i, j = indices[maxIdx]
# add counter as new row
layerRows.append([i, bitwidths[j], counters[maxIdx]])
# update countersTotal
countersTotal += counters[maxIdx]
# remove max counter from lists
del counters[maxIdx]
del indices[maxIdx]
# add counters total row
layerRows.append(['Total', '', countersTotal])
# add layer row to model table
rows.append([layerIdx, layerRows])
# apply loggers functions
for f in loggerFuncs:
f(rows)
# def calcStatistics(self, statistics_queue):
# # prepare for collecting statistics, reset register_buffers values
# for layer in self.layersList:
# for op in layer.opsList:
# conv = op.getConv()
# # reset conv register_buffer values
# conv.layer_b = ones(1).cuda()
# conv.layer_basis = ones(1).cuda()
# conv.initial_clamp_value = ones(1).cuda()
# # get actquant
# actQuant = op.getReLU()
# if actQuant:
# # reset actquant register_buffer values
# actQuant.running_mean = zeros(1).cuda()
# actQuant.running_std = zeros(1).cuda()
# actQuant.clamp_val.data = zeros(1).cuda()
# # set actquant to statistics forward
# actQuant.forward = actQuant.statisticsForward
#
# # train for statistics
# criterion = CrossEntropyLoss().cuda()
# nBatches = 80
# self.eval()
# with no_grad():
# for step, (input, target) in enumerate(statistics_queue):
# if step >= nBatches:
# break
#
# output = self(input.cuda())
# criterion(output, target.cuda())
#
# # apply quantize class statistics functions
# for layerIdx, layer in enumerate(self.layersList):
# # concat layer feature maps together, in order to get initial_clamp_value identical to NICE
# # because initial_clamp_value is calculated based on feature maps weights values
# x = tensor([]).cuda()
# for op in layer.opsList:
# x = cat((x, op.getConv().weight), dim=0)
#
# for op in layer.opsList:
# clamp_value = op.quantize.basic_clamp(x)
# conv = op.getConv()
# conv.initial_clamp_value = clamp_value
# # restore actquant forward function
# actQuant = op.getReLU()
# # set actquant to standard forward
# if actQuant:
# op.quantize.get_act_max_value_from_pre_calc_stats([actQuant])
# actQuant.forward = actQuant.standardForward
#
# print('Layer [{}] - initial_clamp_value:[{}]'.format(layerIdx, conv.initial_clamp_value.item()))
#
# # for op in layer.opsList:
# # opModulesList = list(op.modules())
# # op.quantize.get_act_max_value_from_pre_calc_stats(opModulesList)
# # op.quantize.set_weight_basis(opModulesList, None)
# #
# # conv = op.getConv()
# # print(conv.initial_clamp_value)
# #
#
# # updates statistics in checkpoint, in order to avoid calculating statistics when loading model from checkpoint
# def updateCheckpointStatistics(self, checkpoint, path, statistics_queue):
# needToUpdate = ('updated_statistics' not in checkpoint) or (checkpoint['updated_statistics'] is not True)
# if needToUpdate:
# # quantize model
# self.quantizeUnstagedLayers()
# # change self.nLayersQuantCompleted so calcStatistics() won't quantize again
# nLayersQuantCompletedOrg = self.nLayersQuantCompleted
# self.nLayersQuantCompleted = self.nLayers()
# # load checkpoint weights
# self.load_state_dict(checkpoint['state_dict'])
# # calc weights statistics
# self.calcStatistics(statistics_queue)
# # update checkpoint
# checkpoint['state_dict'] = self.state_dict()
# checkpoint['updated_statistics'] = True
# # save updated checkpoint
# saveModel(checkpoint, path)
# # restore nLayersQuantCompleted
# self.nLayersQuantCompleted = nLayersQuantCompletedOrg
#
# return needToUpdate
# def __loadStatistics(self, filename):
# if exists(filename):
# # stats is a list of dicts per layer
# stats = loadModel(filename)
# print('Loading statistics')
#
# for i, layer in enumerate(self.layersList):
# # get layer dict
# layerStats = stats[i]
# # iterate over layer filters
# for filter in layer.filters:
# # iterate over filter modules
# for m in filter.modules():
# # create module type as string
# moduleType = '{}'.format(type(m))
# NICEprefix = "'NICE."
# if NICEprefix in moduleType:
# moduleType = moduleType.replace(NICEprefix, "'")
#
# # check if type string is in dict
# if moduleType in layerStats:
# # go over dict keys, which is the module variables
# for varName in layerStats[moduleType].keys():
# v = getattr(m, varName)
# # if variable has value in dict, assign it
# if v is not None:
# v.data = layerStats[moduleType][varName].data
# # select random alpha
# def chooseRandomPath(self):
# for l in self.layers:
# l.chooseRandomPath()
# # layerIdx, alphaIdx meaning: self.layersList[layerIdx].curr_alpha_idx = alphaIdx
# # def choosePathByAlphas(self, layerIdx=None, alphaIdx=None):
# def choosePathByAlphas(self):
# for l in self.layers:
# l.choosePathByAlphas()
#
# if (layerIdx is not None) and (alphaIdx is not None):
# layer = self.layersList[layerIdx]
# layer.curr_alpha_idx = alphaIdx
# def evalMode(self):
# for l in self.layers:
# l.evalMode()
#
# # calc bops ratio
# return self.calcBopsRatio()
# def uniformMode(self):
# for l in self.layersList:
# l.uniformMode(self._criterion.baselineBits)
#
# # calc bops ratio
# return self.calcBopsRatio()
# def turnOffAlphas(self):
# for layer in self.layersList:
# # turn off alphas gradients
# layer.alphas.requires_grad = False
#
# self.learnable_alphas = []
# def turnOnAlphas(self):
# self.learnable_alphas = []
# for layer in self.layersList:
# # turn on alphas gradients
# layer.alphas.requires_grad = True
# self.learnable_alphas.append(layer.alphas)
#
# for op in layer.getOps():
# # turn off noise in op
# op.noise = False
#
# ## ==== for tinyNet ====
# # # set pre & post quantization hooks, from now on we want to quantize these ops
# # op.register_forward_pre_hook(save_quant_state)
# # op.register_forward_hook(restore_quant_state)
# # convert current model to discrete, i.e. keep nOpsPerLayer optimal operations per layer
# def toDiscrete(self, nOpsPerLayer=1):
# for layer in self.layersList:
# # calc weights from alphas and sort them
# weights = F.softmax(layer.alphas, dim=-1)
# _, wIndices = weights.sort(descending=True)
# # update layer alphas
# layer.alphas = layer.alphas[wIndices[:nOpsPerLayer]]
# # layer.alphas = tensor(tensor(layer.alphas.tolist()).cuda(), requires_grad=True)
# layer.alphas = tensor(tensor(layer.alphas.tolist()).cuda())
# # take indices of ops we want to remove from layer
# wIndices = wIndices[nOpsPerLayer:]
# # convert to list
# wIndices = wIndices.tolist()
# # sort indices ascending
# wIndices.sort()
# # remove ops and corresponding bops from layer
# for w in reversed(wIndices):
# del layer.ops[w]
# del layer.bops[w]
# def loadBitwidthWeigths(self, stateDict, MaxBopsBits, bitwidth):
# # check idx of MaxBopsBits inside bitwidths
# maxBopsBitsIdx = bitwidth.index(MaxBopsBits)
# maxBopsStateDict = OrderedDict()
# opsKey = 'ops.'
# for key in stateDict.keys():
# # if operation is for max bops bits idx
# if opsKey in key:
# keyOp_num = key.split(opsKey)[1][0]
# if int(keyOp_num) == maxBopsBitsIdx:
# maxBopsKey = key.replace(opsKey + keyOp_num, opsKey + '0')
# maxBopsStateDict[maxBopsKey] = stateDict[key]
# else:
# maxBopsStateDict[key] = stateDict[key]
#
# self.load_state_dict(maxBopsStateDict)
# def _loss(self, input, target):
# totalLoss = 0.0
# nIter = min(self.nPerms, 1000)
# for _ in range(nIter):
# logits = self.forward(input)
#
# # calc alphas product
# alphasProduct = 1.0
# for layer in self.layersList:
# probs = F.softmax(layer.alphas)
# alphasProduct *= probs[layer.curr_alpha_idx]
#
# permLoss = alphasProduct * self._criterion(logits, target, self.countBops())
# # permLoss = self._criterion(logits, target, self.countBops()) / nIter
# permLoss.backward(retain_graph=True)
#
# totalLoss += permLoss.item()
#
# return totalLoss
# def _loss(self, input, target):
# # sum all paths losses * the path alphas multiplication
# totalLoss = 0.0
# nIter = min(self.nPerms, 1000)
# for _ in range(nIter):
# # for perm in product(*self.layersPerm):
# perm = [randint(0, len(layer.alphas) - 1) for layer in self.layersList]
# alphasProduct = 1.0
# # set perm index in each layer
# for i, p in enumerate(perm):
# layer = self.layersList[i]
# layer.curr_alpha_idx = p
# probs = F.softmax(layer.alphas)
# alphasProduct *= probs[p]
#
# logits = self.forward(input)
# # only the alphas are changing...
# permLoss = (alphasProduct * self._criterion(logits, target, self.countBops()))
# permLoss.backward(retain_graph=True)
# totalLoss += permLoss.item()
#
# # print('totalLoss:[{:.5f}]'.format(totalLoss))
# return totalLoss
#
# # logits = self.forward(input)
# # return self._criterion(logits, target, self.countBops())
# def _loss(self, input, target):
# # init how many samples per alpha
# nSamples = self.nSamples
# # init total loss
# totalLoss = 0.0
# # init loss samples list for ALL alphas
# allLossSamples = []
# for j, layer in enumerate(self.layersList):
# # turn off coin toss for this layer
# layer.alphas.requires_grad = False
# # init layer alphas gradient
# layerAlphasGrad = zeros(len(layer.alphas)).cuda()
# # calc layer alphas softmax
# probs = F.softmax(layer.alphas, dim=-1)
#
# for i, alpha in enumerate(layer.alphas):
# # select the specific alpha in this layer
# layer.curr_alpha_idx = i
# # init loss samples list
# alphaLossSamples = []
# for _ in range(nSamples):
# # forward through some path in model
# logits = self(input)
# # alphaLoss += self._criterion(logits, target, self.countBops()).detach()
# alphaLossSamples.append(self._criterion(logits, target, self.countBops()).detach())
#
# # add current alpha loss samples to all loss samples list
# allLossSamples.extend(alphaLossSamples)
# # calc alpha average loss
# alphaAvgLoss = sum(alphaLossSamples) / nSamples
# layerAlphasGrad[i] = alphaAvgLoss
# # add alpha loss to total loss
# totalLoss += (alphaAvgLoss * probs[i])
#
# # calc loss samples variance
# lossVariance = [((x - alphaAvgLoss) ** 2) for x in alphaLossSamples]
# lossVariance = sum(lossVariance) / (nSamples - 1)
# # add alpha loss average to statistics
# self.stats.containers[self.stats.alphaLossAvgKey][j][i].append(alphaAvgLoss.item())
# # add alpha loss variance to statistics
# self.stats.containers[self.stats.alphaLossVarianceKey][j][i].append(lossVariance.item())
#
# # turn in coin toss for this layer
# layer.alphas.requires_grad = True
# # set layer alphas gradient
# layer.alphas.grad = layerAlphasGrad
#
# # add gradNorm to statistics
# self.stats.containers[self.stats.gradNormKey][j].append(layerAlphasGrad.norm().item())
#
# # average total loss
# totalLoss /= self.nLayers()
# # calc all loss samples average
# nTotalSamples = len(allLossSamples)
# allLossSamplesAvg = sum(allLossSamples) / nTotalSamples
# # calc all loss samples variance
# allLossSamples = [((x - allLossSamplesAvg) ** 2) for x in allLossSamples]
# allLossSamplesVariance = (sum(allLossSamples) / (nTotalSamples - 1)).item()
# # add all samples average & loss variance to statistics
# self.stats.containers[self.stats.allSamplesLossAvgKey][0].append(allLossSamplesAvg)
# self.stats.containers[self.stats.allSamplesLossVarianceKey][0].append(allLossSamplesVariance)
#
# # subtract average total loss from every alpha gradient
# for layer in self.layersList:
# layer.alphas.grad -= totalLoss
# # calc layer alphas softmax
# probs = F.softmax(layer.alphas, dim=-1)
# # multiply each grad by its probability
# layer.alphas.grad *= probs
#
# return totalLoss
| [
"torch.nn.functional.softmax"
] | 1.0.0 | aqui-tna/darts-UNIQ | 293a27b104bc0f53c6093829d1184686b788fba9 |
1.0 | from itertools import groupby
from abc import abstractmethod
from torch import cat, chunk, tensor, zeros, int32
from torch.nn import ModuleList, BatchNorm2d, Conv2d
from torch.distributions.multinomial import Multinomial
from torch.nn import functional as F
from cnn.MixedFilter import MixedFilter
from cnn.block import Block
from UNIQ.quantize import check_quantization
from NICE.quantize import ActQuant
# collects stats from forward output
def collectStats(type, val):
funcs = [(lambda x: x.argmin(), lambda x: x.min()), (lambda x: '', lambda x: sum(x) / len(x)), (lambda x: x.argmax(), lambda x: x.max())]
res = [[['Filter#', filterFunc(val)], ['Value', '{:.5f}'.format(valueFunc(val))]] for filterFunc, valueFunc in funcs]
res = [type] + res
return res
def postForward(self, _, output):
assert (False)
if self.quantized is True:
# calc mean, max value per feature map to stats
layerMax = tensor([output.select(1, j).max() for j in range(output.size(1))])
layerAvg = tensor([(output.select(1, j).sum() / output.select(1, j).numel()) for j in range(output.size(1))])
# save min, avg & max values for stats
elements = [('Avg', layerAvg), ('Max', layerMax)]
self.forwardStats = [collectStats(type, val) for type, val in elements]
self.forwardStats.insert(0, ['Type', 'Min', 'Avg', 'Max'])
# for i, m in enumerate(layerMax):
# if m.item() <= 1E-5:
# filter = self.filters[i]
# conv = filter.ops[0][filter.curr_alpha_idx].op[0].weight
# self.forwardStats.append([['Filter#', i], ['MaxVal', m], ['conv weights', conv]])
else:
self.forwardStats = None
# alphas = [[-0.55621, -0.33438, 0.99768, -0.80023], [0.29986, 0.06659, 0.44075, -1.50035], [-0.10046, 0.33549, 0.64312, -1.57129],
# [0.4849, -0.3104, 0.74277, -1.61042], [0.78503, -0.93497, -0.94867], [0.09668, 0.11817, 0.20924, -1.11723],
# [0.01722, 0.46502, 0.33579, -1.51118], [0.04131, -0.74829, -0.39164], [0.16032, 0.38078, 0.15881, -1.39306]]
#
# alphaIdx = [0]
#
#
# def getAlphas():
# res = tensor(alphas[alphaIdx[0]]).cuda()
# alphaIdx[0] = (alphaIdx[0] + 1) % len(alphas)
# return res
class MixedLayer(Block):
def __init__(self, nFilters, createMixedFilterFunc, useResidual=False):
super(MixedLayer, self).__init__()
# create mixed filters
self.filters = ModuleList()
for _ in range(nFilters):
self.filters.append(createMixedFilterFunc())
# make sure mixed filters are subclasses of MixedFilter
assert (isinstance(self.filters[0], MixedFilter))
# init operations alphas (weights)
self.alphas = tensor((zeros(self.numOfOps())).cuda(), requires_grad=True)
# self.alphas = tensor(getAlphas(), requires_grad=True)
self.alphas = self.alphas.cuda()
# =========== change alphas distribution ==================
if self.numOfOps() > 1:
from math import log
filter = self.filters[0]
p = 1 / ((self.numOfOps() * 2) - 1)
logVal = p / (1 - p) * (self.numOfOps() - 1)
for i, op in enumerate(filter.opsList()):
opBitwidth = op.getBitwidth()
if opBitwidth == (8, 8) or opBitwidth == (8, None):
self.alphas.data[i].fill_(log(logVal))
# init filters current partition by alphas, i.e. how many filters are for each alpha, from each quantization
self.currFiltersPartition = [0] * self.numOfOps()
# # set filters distribution
# if self.numOfOps() > 1:
# self.setAlphas([0.3125, 0.3125, 0.1875, 0.125, 0.0625])
# self.setFiltersPartition()
# set forward function
self.forwardFunc = self.residualForward if useResidual else self.standardForward
# # register post forward hook
# self.register_forward_hook(postForward)
# self.forwardStats = None
# set UNIQ parameters
self.quantized = False
self.added_noise = False
def nFilters(self):
return len(self.filters)
def getLayers(self):
return [self]
def quantize(self, layerIdx):
assert (self.added_noise is False)
for op in self.opsList():
assert (op.noise is False)
assert (op.quant is False)
op.quant = True
op.quantizeFunc()
assert (check_quantization(op.getModule(Conv2d).weight) <= (2 ** op.bitwidth[0]))
# quantize activations during training
for m in op.modules():
if isinstance(m, ActQuant):
m.qunatize_during_training = True
self.quantized = True
print('quantized layer [{}] + quantize activations during training'.format(layerIdx))
def unQuantize(self, layerIdx):
assert (self.quantized is True)
assert (self.added_noise is False)
for op in self.opsList():
assert (op.quant is True)
op.quant = False
op.restore_state()
# remove activations quantization during training
for m in op.modules():
if isinstance(m, ActQuant):
m.qunatize_during_training = False
self.quantized = False
print('removed quantization in layer [{}] + removed activations quantization during training'.format(layerIdx))
# just turn on op.noise flag
# noise is being added in pre-forward hook
def turnOnNoise(self, layerIdx):
assert (self.quantized is False)
for op in self.opsList():
assert (op.noise is False)
op.noise = True
self.added_noise = True
print('turned on noise in layer [{}]'.format(layerIdx))
def turnOffNoise(self, layerIdx):
assert (self.quantized is False)
assert (self.added_noise is True)
for op in self.opsList():
assert (op.noise is True)
op.noise = False
self.added_noise = False
print('turned off noise in layer [{}]'.format(layerIdx))
# ratio is a list
def setAlphas(self, ratio):
self.alphas.data = tensor(ratio)
# set filters curr_alpha_idx based on partition tensor
# partition is IntTensor
def setFiltersPartition(self, partition):
assert (partition.sum().item() == self.nFilters())
# reset current filters partition by alphas
self.currFiltersPartition = [0] * self.numOfOps()
# update filters curr_alpha_idx
idx = 0
for i, r in enumerate(partition):
for _ in range(r):
self.filters[idx].curr_alpha_idx = i
self.currFiltersPartition[i] += 1
idx += 1
# set filters partition based on ratio
# ratio is a tensor
def __setFiltersPartitionFromRatio(self, ratio):
# calc partition
partition = (ratio * self.nFilters()).type(int32)
# fix last ratio value to sum to nFilters
if partition.sum().item() < self.nFilters():
partition[-1] = self.nFilters() - partition[:-1].sum().item()
self.setFiltersPartition(partition)
# set filters partition based on alphas ratio
def setFiltersPartitionByAlphas(self):
probs = F.softmax(self.alphas, dim=-1)
self.__setFiltersPartitionFromRatio(probs)
def getCurrentFiltersPartition(self):
return self.currFiltersPartition
# input_bitwidth is a list of bitwidth per feature map
def getBops(self, input_bitwidth):
bops = 0.0
# init bops map
bopsMap = {}
for f in self.filters:
bops += f.getBops(input_bitwidth, bopsMap)
return bops
# returns filters current op bitwidth
def getCurrentBitwidth(self):
# collect filters current bitwidths
bitwidths = [f.getCurrentBitwidth() for f in self.filters]
# group bitwidths
groups = groupby(bitwidths, lambda x: x)
# create a list of tuples [bitwidth, number of filters]
res = []
for _, g in groups:
g = list(g)
res.append([g[0], len(g)])
return res
# create a list of layer output feature maps bitwidth
def getCurrentOutputBitwidth(self):
outputBitwidth = [f.getCurrentOutputBitwidth() for f in self.filters]
return outputBitwidth
def opsList(self):
for filter in self.filters:
for op in filter.opsList():
yield op
def getAllBitwidths(self):
# it doesn't matter which filter we take, the attributes are the same in all filters
return self.filters[0].getAllBitwidths()
def numOfOps(self):
# it doesn't matter which filter we take, the attributes are the same in all filters
return self.filters[0].numOfOps()
def outputLayer(self):
return self
# select alpha based on alphas distribution
def choosePathByAlphas(self):
dist = Multinomial(total_count=self.nFilters(), logits=self.alphas)
partition = dist.sample().type(int32)
self.setFiltersPartition(partition)
@abstractmethod
def preResidualForward(self, x):
raise NotImplementedError('subclasses must override preResidualForward()!')
# operations to perform after adding residual
def postResidualForward(self, x):
out = x
# apply ReLU if exists
if self.filters[0].postResidualForward:
out = []
# split out1 to chunks again
x = chunk(x, self.nFilters(), dim=1)
# apply selected op in each filter
for i, f in enumerate(self.filters):
res = f.postResidualForward(x[i])
out.append(res)
# concat filters output
out = cat(out, 1)
return out
def forward(self, x):
return self.forwardFunc(self, x)
# standard forward
@staticmethod
def standardForward(layer, x):
out = layer.preResidualForward(x)
out = layer.postResidualForward(out)
return out
# forward with residual
@staticmethod
def residualForward(layer, input):
x, residual = input
out = layer.preResidualForward(x)
# add residual
out += residual
out = layer.postResidualForward(out)
return out
class MixedLayerNoBN(MixedLayer):
def __init__(self, nFilters, createMixedFilterFunc, useResidual=False):
super(MixedLayerNoBN, self).__init__(nFilters, createMixedFilterFunc, useResidual)
# operations to perform before adding residual
def preResidualForward(self, x):
out = []
# apply selected op in each filter
for f in self.filters:
res = f(x)
out.append(res)
# concat filters output
out = cat(out, 1)
return out
class MixedLayerWithBN(MixedLayer):
def __init__(self, nFilters, createMixedFilterFunc, useResidual=False):
super(MixedLayerWithBN, self).__init__(nFilters, createMixedFilterFunc, useResidual)
# init batch norm
self.bn = BatchNorm2d(nFilters)
# perform the convolution operation
def forwardConv(self, x):
out = []
# apply selected op in each filter
for f in self.filters:
res = f(x)
out.append(res)
# concat filters output
out = cat(out, 1)
return out
# operations to perform before adding residual
def preResidualForward(self, x):
out = self.forwardConv(x)
# apply batch norm
out = self.bn(out)
return out
# bitwidth list is the same for all filters, therefore we can use the 1st filter list
# def getOutputBitwidthList(self):
# return self.filters[0].getOutputBitwidthList()
# def evalMode(self):
# pass
# # select random alpha
# def chooseRandomPath(self):
# pass
# # quantize activations during training
# def quantActOnTraining(self, layerIdx):
# assert (self.quantized is True)
# assert (self.added_noise is False)
#
# for op in self.opsList:
# for m in op.modules():
# if isinstance(m, ActQuant):
# m.qunatize_during_training = True
#
# print('turned on qunatize_during_training in layer [{}]'.format(layerIdx))
#
# # stop quantize activations during training
# def turnOnGradients(self, layerIdx):
# assert (self.quantized is False)
# assert (self.added_noise is False)
#
# for op in self.opsList:
# for m in op.modules():
# if isinstance(m, ActQuant):
# m.qunatize_during_training = False
#
# print('turned off qunatize_during_training in layer [{}]'.format(layerIdx))
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.BatchNorm2d",
"torch.tensor",
"torch.nn.functional.softmax"
] | 1.0.0 | aqui-tna/darts-UNIQ | 293a27b104bc0f53c6093829d1184686b788fba9 |
1.0 | from torch import zeros, tensor, no_grad
from torch.nn import functional as F
from cnn.model_replicator import ModelReplicator, set_device
class RandomPath(ModelReplicator):
def __init__(self, model, modelClass, args, logger):
super(RandomPath, self).__init__(model, modelClass, args, logger)
def getModel(self, args):
return args[0]
def buildArgs(self, inputPerGPU, targetPerGPU, nSamplesPerModel):
args = ((cModel, inputPerGPU[gpu], targetPerGPU[gpu], nSamples, gpu)
for nSamples, (cModel, gpu) in zip(nSamplesPerModel, self.replications))
return args
def lossPerReplication(self, args):
cModel, input, target, nSamples, gpu = args
# switch to process GPU
set_device(gpu)
assert (cModel.training is False)
with no_grad():
# init total loss
totalLoss = 0.0
# init loss samples list for ALL alphas
allLossSamples = []
# init layers alphas grad
alphasGrad = []
# save stats data
gradNorm = []
alphaLossVariance = []
for layerIdx in layersIndices:
layer = cModel.layersList[layerIdx]
# turn off coin toss for this layer
layer.alphas.requires_grad = False
# init layer alphas gradient
layerAlphasGrad = zeros(len(layer.alphas)).cuda(gpu)
# calc layer alphas softmax
probs = F.softmax(layer.alphas, dim=-1)
for i, alpha in enumerate(layer.alphas):
# # select the specific alpha in this layer
# layer.curr_alpha_idx = i
# init loss samples list
alphaLossSamples = []
for _ in range(nSamples):
# choose path in model based on alphas distribution, while current layer alpha is [i]
cModel.choosePathByAlphas(layerIdx=layerIdx, alphaIdx=i)
# forward input in model
logits = cModel(input)
# alphaLoss += cModel._criterion(logits, target, cModel.countBops()).detach()
alphaLossSamples.append(cModel._criterion(logits, target, cModel.countBops()).detach())
# add current alpha loss samples to all loss samples list
allLossSamples.extend(alphaLossSamples)
# calc alpha average loss
alphaAvgLoss = sum(alphaLossSamples) / nSamples
layerAlphasGrad[i] = alphaAvgLoss
# add alpha loss to total loss
totalLoss += (alphaAvgLoss * probs[i])
# calc loss samples variance
lossVariance = [((x - alphaAvgLoss) ** 2) for x in alphaLossSamples]
lossVariance = sum(lossVariance) / (nSamples - 1)
# add alpha loss variance to statistics
alphaLossVariance.append((layerIdx, i, alphaAvgLoss.item(), lossVariance.item()))
# turn in coin toss for this layer
layer.alphas.requires_grad = True
# add layer alphas grad to container
alphasGrad.append(layerAlphasGrad)
# add gradNorm to statistics
gradNorm.append((layerIdx, layerAlphasGrad.norm().item()))
return alphasGrad, allLossSamples, layersIndices, totalLoss, gradNorm, alphaLossVariance
def processResults(self, model, results):
stats = model.stats
# init total loss
totalLoss = tensor(0.0).cuda()
# init loss samples list for ALL alphas
allLossSamples = []
# process returned results
for alphasGrad, partialLossSamples, layersIndices, partialLoss, gradNorm, alphaLossVariance in results:
# add alphas loss samples to all loss samples list
allLossSamples.extend(partialLossSamples)
# calc total loss & total number of samples
totalLoss += partialLoss.to(totalLoss.device)
# update statistics
for layerIdx, v in gradNorm:
stats.containers[stats.gradNormKey][layerIdx].append(v)
for layerIdx, j, avg, variance in alphaLossVariance:
stats.containers[stats.alphaLossAvgKey][layerIdx][j].append(avg)
stats.containers[stats.alphaLossVarianceKey][layerIdx][j].append(variance)
# update layers alphas gradients
for layerAlphasGrads, layerIdx in zip(alphasGrad, layersIndices):
alphas = model.layersList[layerIdx].alphas
alphas.grad = layerAlphasGrads.to(alphas.device)
# average total loss
totalLoss /= model.nLayers()
# calc all loss samples average
nTotalSamples = len(allLossSamples)
allLossSamplesAvg = sum(allLossSamples) / nTotalSamples
# calc all loss samples variance
allLossSamples = [((x - allLossSamplesAvg) ** 2) for x in allLossSamples]
allLossSamplesVariance = (sum(allLossSamples) / (nTotalSamples - 1))
# add all samples loss average & variance to statistics
stats.containers[stats.allSamplesLossAvgKey][0].append(allLossSamplesAvg)
stats.containers[stats.allSamplesLossVarianceKey][0].append(allLossSamplesVariance)
# subtract average total loss from every alpha gradient
for layerAlphas in model.arch_parameters():
layerAlphas.grad -= totalLoss
# calc layer alphas softmax
probs = F.softmax(layerAlphas, dim=-1)
# multiply each grad by its probability
layerAlphas.grad *= probs
return totalLoss
# subtract average total loss from every alpha gradient
# for layer in model.layersList:
# layer.alphas.grad -= totalLoss
# # calc layer alphas softmax
# probs = F.softmax(layer.alphas, dim=-1)
# # multiply each grad by its probability
# layer.alphas.grad *= probs
| [
"torch.no_grad",
"torch.tensor",
"torch.nn.functional.softmax"
] | 1.0.0 | aqui-tna/darts-UNIQ | 293a27b104bc0f53c6093829d1184686b788fba9 |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import warnings
import omegaconf
import torch
from mmf.common.dataset_loader import DatasetLoader
from mmf.common.registry import registry
from mmf.modules.metrics import Metrics
from mmf.trainers.base_trainer import BaseTrainer
from mmf.trainers.callbacks.checkpoint import CheckpointCallback
from mmf.trainers.callbacks.early_stopping import EarlyStoppingCallback
from mmf.trainers.callbacks.logistics import LogisticsCallback
from mmf.trainers.callbacks.lr_scheduler import LRSchedulerCallback
from mmf.trainers.core.callback_hook import TrainerCallbackHookMixin
from mmf.trainers.core.device import TrainerDeviceMixin
from mmf.trainers.core.evaluation_loop import TrainerEvaluationLoopMixin
from mmf.trainers.core.profiling import TrainerProfilingMixin
from mmf.trainers.core.reporting import TrainerReportingMixin
from mmf.trainers.core.training_loop import TrainerTrainingLoopMixin
from mmf.utils.build import build_model, build_optimizer
from mmf.utils.general import print_model_parameters
from omegaconf import DictConfig, OmegaConf
logger = logging.getLogger(__name__)
@registry.register_trainer("mmf")
class MMFTrainer(
TrainerCallbackHookMixin,
TrainerTrainingLoopMixin,
TrainerDeviceMixin,
TrainerEvaluationLoopMixin,
TrainerReportingMixin,
TrainerProfilingMixin,
BaseTrainer,
):
def __init__(self, config: DictConfig):
super().__init__(config)
def load(self):
super().load()
self.load_fp16_scaler()
# Callbacks
self.on_init_start()
# Parallize model
self.parallelize_model()
# Callbacks
self.on_init_end()
def configure_callbacks(self):
self.checkpoint_callback = CheckpointCallback(self.config, self)
self.early_stop_callback = EarlyStoppingCallback(self.config, self)
self.logistics_callback = LogisticsCallback(self.config, self)
self.lr_scheduler_callback = LRSchedulerCallback(self.config, self)
# Add callbacks for execution during events
self.callbacks.append(self.lr_scheduler_callback)
# checkpoint_callback needs to be called after lr_scheduler_callback so that
# lr_scheduler_callback._scheduler.step() happens before saving checkpoints
# (otherwise the saved last_epoch in scheduler would be wrong)
self.callbacks.append(self.checkpoint_callback)
self.callbacks.append(self.logistics_callback)
def load_datasets(self):
logger.info("Loading datasets")
self.dataset_loader = DatasetLoader(self.config)
self.dataset_loader.load_datasets()
self.train_dataset = self.dataset_loader.train_dataset
self.val_dataset = self.dataset_loader.val_dataset
self.test_dataset = self.dataset_loader.test_dataset
self.train_loader = self.dataset_loader.train_loader
self.val_loader = self.dataset_loader.val_loader
self.test_loader = self.dataset_loader.test_loader
def load_model(self):
logger.info("Loading model")
if self.config.model in self.config.model_config:
attributes = self.config.model_config[self.config.model]
else:
warnings.warn(
f"Model {self.config.model}'s config not present. "
+ "Continuing with empty config"
)
attributes = OmegaConf.create()
# Easy way to point to config for other model
if isinstance(attributes, str):
attributes = self.config.model_config[attributes]
with omegaconf.open_dict(attributes):
attributes.model = self.config.model
self.model = build_model(attributes)
self.model = self.model.to(self.device)
def load_optimizer(self):
logger.info("Loading optimizer")
self.optimizer = build_optimizer(self.model, self.config)
def load_metrics(self) -> None:
logger.info("Loading metrics")
metrics = self.config.evaluation.get("metrics", [])
self.metrics = Metrics(metrics)
self.metrics_params = self.metrics.required_params
def load_fp16_scaler(self):
if self.training_config.fp16:
assert (
torch.__version__ >= "1.6"
), "Using fp16 requires torch version >- 1.6"
assert self.device != torch.device("cpu"), "fp16 cannot be used on cpu"
set_torch_grad_scaler = True
if self.training_config.fp16 and self.distributed:
try:
from fairscale.optim.oss import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if isinstance(self.optimizer, OSS):
self.scaler = ShardedGradScaler()
set_torch_grad_scaler = False
logger.info("Using FairScale ShardedGradScaler")
except ImportError:
logger.info("Using Pytorch AMP GradScaler")
if set_torch_grad_scaler:
self.scaler = torch.cuda.amp.GradScaler(enabled=self.training_config.fp16)
def train(self):
logger.info("===== Model =====")
logger.info(self.model)
print_model_parameters(self.model)
if "train" not in self.run_type:
self.inference()
return
self.on_train_start()
self.training_loop()
self.on_train_end()
self.inference()
def inference(self):
dataset_type = []
if "val" in self.run_type:
dataset_type.append("val")
if any(rt in self.run_type for rt in ["inference", "test", "predict"]):
dataset_type.append("test")
for dataset in dataset_type:
if self.config.evaluation.predict:
self.on_prediction_start()
self.prediction_loop(dataset)
self.on_prediction_end()
else:
self.on_test_start()
logger.info(f"Starting inference on {dataset} set")
report, meter = self.evaluation_loop(dataset, use_tqdm=True)
self.on_test_end(report=report, meter=meter)
| [
"torch.cuda.amp.GradScaler",
"torch.device"
] | 1.6.0 | Yui010206/mmf | 01e7ccd664a4492f65ba10aeb3eeeafef62c3b87 |
1.1 | # Copyright 2019 FMR LLC <[email protected]>
# SPDX-License-Identifer: Apache-2.0
import numpy as np
import torch
from textwiser.base import BaseFeaturizer
from textwiser.utils import convert, OutputType
class _BaseTransformation(BaseFeaturizer):
def __init__(self, wrap_list_input=True):
"""Initializes a Transformation.
Subclasses must call this __init__ method.
Parameters
----------
wrap_list_input : bool
If true, any list input to fit, forward, or fit_transform functions will
be stacked to a 2D tensor before the functions are called, and will be
converted back to a list before being returned.
"""
super(_BaseTransformation, self).__init__()
self.wrap_list_input = wrap_list_input
@property
def input_types(self):
return OutputType.tensor,
def _check_input(self, x):
if not isinstance(x, tuple(t.value for t in self.input_types)):
return convert(x, self.input_types[0])
return x
def _forward(self, x):
raise NotImplementedError("Transformations should implement the `_forward` method.")
def fit(self, x, y=None):
x = self._check_input(x)
if self.wrap_list_input:
if isinstance(x, list): # happens after WordEmbedding
x = torch.cat(x, 0)
self._fit(x, y)
def _wrap_list_input(self, fn, uses_y, x, y=None):
sizes = None
if isinstance(x, list): # happens after WordEmbedding
if len(x) == 0:
return []
sizes = [0]
sizes.extend([doc.shape[0] for doc in x])
x = torch.cat(x, 0)
vec = fn(x, y) if uses_y else fn(x)
if sizes:
cs = np.cumsum(sizes)
vec = [vec[cs[i]:cs[i + 1], :] for i in range(cs.shape[0] - 1)]
return vec
def forward(self, x):
x = self._check_input(x)
return self._wrap_list_input(self._forward, False, x) if self.wrap_list_input else self._forward(x)
def fit_transform(self, x, y=None):
x = self._check_input(x)
return self._wrap_list_input(self._fit_transform, True, x, y) if self.wrap_list_input else self._fit_transform(x, y)
def _fit_transform(self, x, y=None):
x = self._check_input(x)
self._fit(x, y)
return self._forward(x)
| [
"torch.cat"
] | 1.1.0 | vishalbelsare/textwiser | 2c5bdd73c26bd3fb7bd2f324f57d99233aa9c17f |
1.0 | import numpy as np
import pytest
import torch
from ignite.contrib.handlers.param_scheduler import (
ConcatScheduler,
CosineAnnealingScheduler,
LinearCyclicalScheduler,
LRScheduler,
ParamGroupScheduler,
PiecewiseLinear,
create_lr_scheduler_with_warmup,
)
from ignite.engine import Engine, Events
def test_linear_scheduler():
with pytest.raises(TypeError, match=r"Argument optimizer should be torch.optim.Optimizer"):
LinearCyclicalScheduler({}, "lr", 1, 0, cycle_size=0)
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.0)
with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"):
LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=0)
with pytest.raises(ValueError, match=r"Argument cycle_size should be positive and larger than 1"):
LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=1)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10)
state_dict = scheduler.state_dict()
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 9, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 2
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4, # 0.6, 0.8,
],
)
)
scheduler.load_state_dict(state_dict)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, cycle_mult=2)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 10, max_epochs=3)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 2
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.4,
0.3,
0.2,
0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
],
)
)
scheduler.load_state_dict(state_dict)
# With float cycle_size
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(
optimizer, "lr", start_value=1.2, end_value=0.2, cycle_size=10.00000012, cycle_mult=1.0
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 9, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1
1.2,
1.0,
0.8,
0.6,
0.4,
0.2,
0.4,
0.6,
0.8,
1.0,
# Cycle 2
1.2,
1.0,
0.8,
0.6,
0.4,
0.2,
0.4,
0.6, # 0.8, 1.0,
],
)
)
scheduler.load_state_dict(state_dict)
def test_linear_scheduler_cycle_size_two():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, cycle_size=2)
data = [0] * 10
max_epochs = 2
simulated_values = LinearCyclicalScheduler.simulate_values(
num_events=len(data) * max_epochs, param_name="lr", start_value=1, end_value=0, cycle_size=2
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
)
)
assert lrs == pytest.approx([v for i, v in simulated_values])
def test_cosine_annealing_scheduler():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = CosineAnnealingScheduler(optimizer, "lr", 0, 1, 10)
state_dict = scheduler.state_dict()
data = [0] * 9
max_epochs = 2
simulated_values = CosineAnnealingScheduler.simulate_values(
num_events=len(data) * max_epochs, param_name="lr", start_value=0, end_value=1, cycle_size=10
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365,
0.9045084971874737,
0.9755282581475768,
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365, # 0.9045084971874737, 0.9755282581475768
],
)
)
scheduler.load_state_dict(state_dict)
assert lrs == pytest.approx([v for i, v in simulated_values])
def test_concat_scheduler_asserts():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
with pytest.raises(ValueError):
ConcatScheduler(schedulers=[], durations=[])
with pytest.raises(ValueError):
ConcatScheduler(schedulers=[scheduler_1], durations=[10])
with pytest.raises(TypeError):
ConcatScheduler(schedulers=[scheduler_1, 12], durations=[10])
with pytest.raises(ValueError):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=[10, 5])
with pytest.raises(ValueError):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2, scheduler_2], durations=[15, 12.0])
with pytest.raises(ValueError):
ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations="abc")
with pytest.raises(ValueError):
ConcatScheduler.simulate_values(
num_events=123, schedulers=[scheduler_1, scheduler_2], durations=[15], param_names="abc"
)
def test_concat_scheduler_state_dict():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=False)
state_dict = concat_scheduler.state_dict()
assert state_dict["durations"] == durations
assert state_dict["_current_duration"] == durations[0]
assert state_dict["_scheduler_index"] == 0
for _ in range(20):
concat_scheduler(None, None)
concat_scheduler.load_state_dict(state_dict)
assert concat_scheduler.durations == durations
assert concat_scheduler._current_duration == durations[0]
assert id(concat_scheduler._current_scheduler) == id(scheduler_1)
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
concat_scheduler.load_state_dict({"a": 1})
with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of concatenated schedulers"):
concat_scheduler.load_state_dict({"schedulers": []})
def test_concat_scheduler_two_schedulers():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
def _test(duration_vals_as_np_int):
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=10)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
if duration_vals_as_np_int:
durations = [np.int64(t) for t in durations]
concat_scheduler = ConcatScheduler(
schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True
)
state_dict = concat_scheduler.state_dict()
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1 of the LinearCyclicalScheduler
1.0,
0.8,
0.6,
0.4,
0.2,
0.0,
0.2,
0.4,
0.6,
0.8,
# Cycle 1 of the CosineAnnealingScheduler
0.0,
0.02447174185242318,
0.09549150281252627,
0.20610737385376332,
0.3454915028125263,
0.5,
0.6545084971874737,
0.7938926261462365,
0.9045084971874737,
0.9755282581475768,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
_test(duration_vals_as_np_int=False)
_test(duration_vals_as_np_int=True)
def test_concat_scheduler_two_linear():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.0, end_value=0.1, cycle_size=2)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.2, end_value=1.0, cycle_size=2)
durations = [5]
concat_scheduler = ConcatScheduler(schedulers=[scheduler_1, scheduler_2], durations=durations, save_history=True)
state_dict = concat_scheduler.state_dict()
assert concat_scheduler.get_param() == 0.0
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# first LinearCyclicalScheduler
0.0,
0.1,
0.0,
0.1,
0.0,
# second LinearCyclicalScheduler
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
1.0,
0.2,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
def test_concat_scheduler_3_schedulers():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.5, cycle_size=20)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.45, cycle_size=10)
scheduler_3 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.5, end_value=0.0, cycle_size=20)
durations = [10, 5]
concat_scheduler = ConcatScheduler(
schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations, save_history=True
)
state_dict = concat_scheduler.state_dict()
data = [0] * 10
max_epochs = 2
simulated_values = ConcatScheduler.simulate_values(
num_events=len(data) * max_epochs, schedulers=[scheduler_1, scheduler_2, scheduler_3], durations=durations
)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, concat_scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == list(
map(
pytest.approx,
[
# Cycle 1 of the first LinearCyclicalScheduler
1.0,
0.95,
0.9,
0.85,
0.8,
0.75,
0.7,
0.65,
0.6,
0.55,
# Cycle 1 of the second LinearCyclicalScheduler
0.5,
0.49,
0.48,
0.47,
0.46,
# Cycle 1 of the third LinearCyclicalScheduler
0.5,
0.45,
0.4,
0.35,
0.3,
],
)
)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
assert lrs == pytest.approx([v for i, v in simulated_values])
concat_scheduler.load_state_dict(state_dict)
def test_save_param_history():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
scheduler = LinearCyclicalScheduler(optimizer, "lr", 1, 0, 10, save_history=True)
lrs = []
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
assert not hasattr(trainer.state, "param_history")
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
trainer.run([0] * 10, max_epochs=2)
state_lrs = trainer.state.param_history["lr"]
assert len(state_lrs) == len(lrs)
# Unpack singleton lists
assert [group[0] for group in state_lrs] == lrs
def test_lr_scheduler_asserts():
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
with pytest.raises(ValueError):
LRScheduler(lr_scheduler)
with pytest.raises(ValueError):
LRScheduler.simulate_values(num_events=100, lr_scheduler=lr_scheduler)
with pytest.raises(TypeError):
LRScheduler(123)
def test_lr_scheduler():
def _test(torch_lr_scheduler_cls, **kwargs):
tensor = torch.zeros([1], requires_grad=True)
optimizer1 = torch.optim.SGD([tensor], lr=0.01)
optimizer2 = torch.optim.SGD([tensor], lr=0.01)
opt_state_dict1 = optimizer1.state_dict()
opt_state_dict2 = optimizer2.state_dict()
torch_lr_scheduler1 = torch_lr_scheduler_cls(optimizer=optimizer1, **kwargs)
scheduler = LRScheduler(torch_lr_scheduler1)
state_dict1 = scheduler.state_dict()
torch_lr_scheduler2 = torch_lr_scheduler_cls(optimizer=optimizer2, **kwargs)
state_dict2 = torch_lr_scheduler2.state_dict()
def dummy_update(engine, batch):
optimizer1.step()
optimizer2.step()
trainer = Engine(dummy_update)
@trainer.on(Events.ITERATION_STARTED)
def save_lr(engine):
lrs.append(optimizer1.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_STARTED)
def save_true_lr(engine):
lrs_true.append(optimizer2.param_groups[0]["lr"])
@trainer.on(Events.ITERATION_COMPLETED)
def torch_lr_scheduler_step(engine):
torch_lr_scheduler2.step()
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
for _ in range(2):
lrs = []
lrs_true = []
data = [0] * 10
max_epochs = 2
trainer.run(data, max_epochs=max_epochs)
assert lrs_true == pytest.approx(lrs), "{}: {} ({}) vs {} ({})".format(
_, lrs_true, len(lrs_true), lrs, len(lrs)
)
optimizer1.load_state_dict(opt_state_dict1)
scheduler.load_state_dict(state_dict1)
optimizer2.load_state_dict(opt_state_dict2)
torch_lr_scheduler2.load_state_dict(state_dict2)
optimizer3 = torch.optim.SGD([tensor], lr=0.01)
torch_lr_scheduler3 = torch_lr_scheduler_cls(optimizer=optimizer3, **kwargs)
simulated_values = LRScheduler.simulate_values(
num_events=len(data) * max_epochs, lr_scheduler=torch_lr_scheduler3
)
assert lrs == pytest.approx([v for i, v in simulated_values])
_test(torch.optim.lr_scheduler.StepLR, step_size=5, gamma=0.5)
_test(torch.optim.lr_scheduler.ExponentialLR, gamma=0.78)
# test _replicate_lr_scheduler
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.01)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.78)
init_lr_scheduler_state = dict(lr_scheduler.state_dict())
copy_lr_scheduler = LRScheduler._replicate_lr_scheduler(lr_scheduler)
for _ in range(10):
optimizer.step()
lr_scheduler.step()
assert copy_lr_scheduler.state_dict() == init_lr_scheduler_state
with pytest.raises(TypeError):
LRScheduler._replicate_lr_scheduler(12)
def test_piecewiselinear_asserts():
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
with pytest.raises(ValueError):
PiecewiseLinear(optimizer, "lr", milestones_values=[])
with pytest.raises(ValueError):
PiecewiseLinear(optimizer, "lr", milestones_values=[(0.5,)])
with pytest.raises(ValueError):
PiecewiseLinear(optimizer, "lr", milestones_values=[(10, 0.5), (0.6,)])
with pytest.raises(ValueError):
PiecewiseLinear(optimizer, "lr", milestones_values=[(10, 0.5), (5, 0.6)])
def test_piecewiselinear():
def _test(milestones_as_np_int):
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0)
milestones_values = [(5, 0.5), (15, 1.0), (25, 0.0), (35, 1.0), (40, 0.5)]
if milestones_as_np_int:
milestones_values = [(np.int64(t), v) for t, v in milestones_values]
scheduler = PiecewiseLinear(optimizer, "lr", milestones_values=milestones_values)
state_dict = scheduler.state_dict()
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
trainer.add_event_handler(Events.ITERATION_COMPLETED, save_lr)
for _ in range(2):
lrs = []
trainer.run([0] * 25, max_epochs=2)
assert lrs == list(
map(
pytest.approx,
[
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.55,
0.6,
0.65,
0.7,
0.75,
0.8,
0.85,
0.9,
0.95,
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.4,
0.3,
0.2,
0.1,
0.0,
0.1,
0.2,
0.3,
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
1.0,
0.9,
0.8,
0.7,
0.6,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
],
)
)
scheduler.load_state_dict(state_dict)
_test(milestones_as_np_int=True)
_test(milestones_as_np_int=False)
def test_simulate_and_plot_values():
import matplotlib
matplotlib.use("Agg")
def _test(scheduler_cls, **scheduler_kwargs):
optimizer = None
event = Events.ITERATION_STARTED
if scheduler_cls == LRScheduler:
scheduler_kwargs["optimizer"] = scheduler_kwargs["lr_scheduler"].optimizer
optimizer = scheduler_kwargs["optimizer"]
event = Events.ITERATION_COMPLETED
elif scheduler_cls == ConcatScheduler:
optimizer = scheduler_kwargs["optimizer"]
del scheduler_kwargs["optimizer"]
else:
tensor = torch.zeros([1], requires_grad=True)
scheduler_kwargs["optimizer"] = torch.optim.SGD([tensor], lr=0.1)
optimizer = scheduler_kwargs["optimizer"]
max_epochs = 2
data = [0] * 10
# simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
scheduler = scheduler_cls(**scheduler_kwargs)
lrs = []
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(event, scheduler)
trainer.add_event_handler(Events.ITERATION_STARTED, save_lr)
trainer.run(data, max_epochs=max_epochs)
# assert lrs == pytest.approx([v for i, v in simulated_values])
if scheduler_cls == LRScheduler or scheduler_cls == ConcatScheduler:
# As internal state of torch lr scheduler has been changed the following checks will fail
return
# reexecute to check if no internal changes
# simulated_values = scheduler_cls.simulate_values(num_events=len(data) * max_epochs,
# save_history=True, # this will be removed
# **scheduler_kwargs)
# assert lrs == pytest.approx([v for i, v in simulated_values])
# launch plot values
scheduler_cls.plot_values(num_events=len(data) * max_epochs, **scheduler_kwargs)
# LinearCyclicalScheduler
_test(LinearCyclicalScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)
# CosineAnnealingScheduler
_test(CosineAnnealingScheduler, param_name="lr", start_value=1.0, end_value=0.0, cycle_size=10)
# LRScheduler
tensor = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.1)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.5)
_test(LRScheduler, lr_scheduler=torch_lr_scheduler)
# ConcatScheduler = [LinearCyclicalScheduler, CosineAnnealingScheduler]
scheduler_1 = LinearCyclicalScheduler(optimizer, "lr", start_value=1.0, end_value=0.0, cycle_size=20)
scheduler_2 = CosineAnnealingScheduler(optimizer, "lr", start_value=0.0, end_value=1.0, cycle_size=10)
durations = [10]
_test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)
# ConcatScheduler = [LinearCyclicalScheduler, LRScheduler]
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=1.5)
scheduler_1 = LRScheduler(torch_lr_scheduler)
scheduler_2 = LinearCyclicalScheduler(optimizer, "lr", start_value=0.1, end_value=0.0, cycle_size=10)
durations = [10]
_test(ConcatScheduler, optimizer=optimizer, schedulers=[scheduler_1, scheduler_2], durations=durations)
# PiecewiseLinear
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
_test(
PiecewiseLinear,
optimizer=optimizer,
param_name="lr",
milestones_values=[(10, 0.5), (20, 0.45), (21, 0.3), (30, 0.1), (40, 0.1)],
)
def test_create_lr_scheduler_with_warmup():
with pytest.raises(TypeError, match=r"Argument lr_scheduler should be a subclass of"):
create_lr_scheduler_with_warmup(12, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=10)
t1 = torch.zeros([1], requires_grad=True)
# A) opt lr != warmup_end_value
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
with pytest.raises(ValueError, match=r"Argument warmup_duration should be at least 2 events"):
create_lr_scheduler_with_warmup(
torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration=1
)
with pytest.raises(ValueError, match=r"Argument warmup_duration should be at least 2 events"):
create_lr_scheduler_with_warmup(
torch_lr_scheduler, warmup_start_value=0.0, warmup_end_value=0.1, warmup_duration="abc"
)
with pytest.raises(TypeError, match=r"Argument output_simulated_values should be a list of None"):
simulated_values = ()
create_lr_scheduler_with_warmup(
torch_lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=0.1,
warmup_duration=10,
output_simulated_values=simulated_values,
)
def _test(lr_scheduler, optimizer, warmup_start_value, warmup_end_value, warmup_duration, warmup_end_next_value):
num_iterations = 10
max_epochs = 20
simulated_values = [None] * (num_iterations * max_epochs)
scheduler = create_lr_scheduler_with_warmup(
lr_scheduler,
warmup_start_value=warmup_start_value,
warmup_end_value=warmup_end_value,
warmup_duration=warmup_duration,
output_simulated_values=simulated_values,
)
if warmup_end_value is None:
warmup_end_value = optimizer.param_groups[0]["lr"]
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
@trainer.on(Events.ITERATION_STARTED)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
data = [0] * num_iterations
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for i, v in simulated_values])
assert lrs[0] == pytest.approx(warmup_start_value), "lrs={}".format(lrs[: warmup_duration + num_iterations])
assert lrs[warmup_duration - 1] == pytest.approx(warmup_end_value), "lrs={}".format(
lrs[: warmup_duration + num_iterations]
)
assert lrs[warmup_duration] == pytest.approx(warmup_end_next_value), "lrs={}".format(
lrs[: warmup_duration + num_iterations]
)
scheduler.load_state_dict(state_dict)
t1 = torch.zeros([1], requires_grad=True)
# A) opt lr != warmup_end_value
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
_test(torch_lr_scheduler, optimizer, 0.01, 0.05, 10, 0.2)
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
_test(torch_lr_scheduler, optimizer, 0.01, 0.05, 2, 0.2)
# B) opt lr == warmup_end_value
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
_test(torch_lr_scheduler, optimizer, 0.01, 0.2, 10, 0.2 * 0.98)
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
_test(torch_lr_scheduler, optimizer, 0.01, 0.2, 2, 0.2 * 0.98)
# C) lr_scheduler start_value != warmup_end_value
t1 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([t1], lr=0.0)
lr_scheduler = LinearCyclicalScheduler(
optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10
)
_test(lr_scheduler, optimizer, 0.01, 0.05, 10, 0.8)
optimizer = torch.optim.SGD([t1], lr=0.0)
lr_scheduler = LinearCyclicalScheduler(
optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10
)
_test(lr_scheduler, optimizer, 0.01, 0.05, 2, 0.8)
# D) lr_scheduler start_value == warmup_end_value
t1 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([t1], lr=0.0)
lr_scheduler = LinearCyclicalScheduler(
optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10
)
_test(lr_scheduler, optimizer, 0.01, 0.8, 10, 0.8 - (0.8 / 5.0))
optimizer = torch.optim.SGD([t1], lr=0.0)
lr_scheduler = LinearCyclicalScheduler(
optimizer=optimizer, param_name="lr", start_value=0.8, end_value=0.0, cycle_size=10
)
_test(lr_scheduler, optimizer, 0.01, 0.8, 2, 0.8 - (0.8 / 5.0))
# E) warmup_end_value is None: fall back to case B)
optimizer = torch.optim.SGD([t1], lr=0.2)
torch_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=0.98)
_test(torch_lr_scheduler, optimizer, 0.01, None, 10, 0.2 * 0.98)
def test_create_lr_scheduler_with_warmup_on_combined_scheduler():
# Test with a complex scheduler
def _test(save_history):
tensor = torch.ones([1], requires_grad=True)
optimizer = torch.optim.SGD([tensor], lr=0.001)
max_epochs = 25
lr_max_value = 0.4
num_iterations_per_epoch = 128
num_iterations = max_epochs * num_iterations_per_epoch
warmup_duration = 5 * num_iterations_per_epoch
cooldown_duration = 5 * num_iterations_per_epoch
scheduler_1 = LinearCyclicalScheduler(
optimizer,
"lr",
start_value=lr_max_value,
end_value=lr_max_value * 0.9,
cycle_size=(num_iterations - warmup_duration - cooldown_duration) * 2,
)
scheduler_2 = LinearCyclicalScheduler(
optimizer, "lr", start_value=lr_max_value, end_value=0.0, cycle_size=cooldown_duration * 2
)
lr_scheduler = ConcatScheduler(
schedulers=[scheduler_1, scheduler_2],
durations=[num_iterations - warmup_duration - cooldown_duration],
save_history=False,
)
lr_values = [None] * num_iterations
scheduler = create_lr_scheduler_with_warmup(
lr_scheduler,
warmup_start_value=0.0,
warmup_end_value=lr_max_value,
warmup_duration=warmup_duration,
save_history=save_history,
output_simulated_values=lr_values,
)
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.ITERATION_COMPLETED)
def save_lr(engine):
lrs.append(optimizer.param_groups[0]["lr"])
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
data = [0] * num_iterations_per_epoch
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert lrs == pytest.approx([v for i, v in lr_values])
if save_history:
param_history = trainer.state.param_history["lr"]
assert lrs == pytest.approx([v[0] for v in param_history])
scheduler.load_state_dict(state_dict)
_test(save_history=False)
_test(save_history=True)
def test_create_lr_scheduler_with_warmup_with_real_model(dummy_model_factory):
model = dummy_model_factory(with_grads=False, with_frozen_layer=False)
init_lr = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=init_lr)
scaled_lr = 0.02
warmup_duration = 5
step_size = 2
gamma = 0.97
output_simulated_values = [None] * 50
create_lr_scheduler_with_warmup(
torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma),
warmup_start_value=0.0,
warmup_end_value=scaled_lr,
warmup_duration=warmup_duration,
output_simulated_values=output_simulated_values,
)
assert output_simulated_values[0] == [0, 0.0]
assert output_simulated_values[warmup_duration - 1] == [warmup_duration - 1, scaled_lr]
assert output_simulated_values[warmup_duration] == [warmup_duration, init_lr]
v = [warmup_duration + step_size, init_lr * gamma]
assert output_simulated_values[warmup_duration + step_size] == v
def test_param_group_scheduler_asserts():
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler1 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10
)
lr_scheduler2 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10
)
with pytest.raises(ValueError):
ParamGroupScheduler(schedulers=[0, 1, 2], names=["a", "b", "c"])
with pytest.raises(ValueError):
ParamGroupScheduler(schedulers=[lr_scheduler1, "2"], names=["a", "b"])
with pytest.raises(ValueError):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names="ab")
with pytest.raises(ValueError):
ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a"])
scheduler = ParamGroupScheduler(schedulers=[lr_scheduler1, lr_scheduler2], names=["a", "b"])
with pytest.raises(ValueError, match=r"Required state attribute 'schedulers' is absent in provided state_dict"):
scheduler.load_state_dict({"a": 1})
with pytest.raises(ValueError, match=r"Input state_dict contains 0 state_dicts of param group schedulers"):
scheduler.load_state_dict({"schedulers": []})
with pytest.raises(
ValueError, match=r"Name of scheduler from input state dict does not " r"correspond to required one"
):
scheduler.load_state_dict({"schedulers": [("a", lr_scheduler1.state_dict()), ("bad_name", {})]})
def test_param_group_scheduler():
def _test(lr_schedulers, optimizer):
num_iterations = 10
max_epochs = 20
scheduler = ParamGroupScheduler(lr_schedulers, names=["s_{}".format(i) for i in range(len(lr_schedulers))])
state_dict = scheduler.state_dict()
trainer = Engine(lambda engine, batch: None)
@trainer.on(Events.ITERATION_COMPLETED)
def save_lr(engine):
lrs.append((optimizer.param_groups[0]["lr"], optimizer.param_groups[1]["lr"]))
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
data = [0] * num_iterations
for _ in range(2):
lrs = []
trainer.run(data, max_epochs=max_epochs)
assert [lr[0] for lr in lrs] == pytest.approx([lr[1] for lr in lrs])
scheduler.load_state_dict(state_dict)
t1 = torch.zeros([1], requires_grad=True)
t2 = torch.zeros([1], requires_grad=True)
optimizer = torch.optim.SGD([{"params": t1, "lr": 0.1}, {"params": t2, "lr": 0.1}])
lr_scheduler1 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=0, start_value=1.0, end_value=0.0, cycle_size=10
)
lr_scheduler2 = LinearCyclicalScheduler(
optimizer, "lr", param_group_index=1, start_value=1.0, end_value=0.0, cycle_size=10
)
_test([lr_scheduler1, lr_scheduler2], optimizer)
| [
"torch.zeros",
"torch.optim.lr_scheduler.StepLR",
"torch.optim.SGD",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.ones"
] | 1.0 | jonrbates/ignite | 15eeb8791a2e0c2f55265e1f6b91f91dc35286c5 |
1.0 | import torch
from ignite.contrib.metrics.regression._base import _BaseRegression
from ignite.exceptions import NotComputableError
class GeometricMeanAbsoluteError(_BaseRegression):
r"""
Calculates the Geometric Mean Absolute Error.
:math:`\text{GMAE} = \exp(\frac{1}{n}\sum_{j=1}^n\ln(|A_j - P_j|))`
where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
More details can be found in `Botchkarev 2018`__.
- `update` must receive output of the form `(y_pred, y)` or `{'y_pred': y_pred, 'y': y}`.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
__ https://arxiv.org/abs/1809.03006
"""
def reset(self):
self._sum_of_errors = 0.0
self._num_examples = 0
def _update(self, output):
y_pred, y = output
errors = torch.log(torch.abs(y.view_as(y_pred) - y_pred))
self._sum_of_errors += torch.sum(errors)
self._num_examples += y.shape[0]
def compute(self):
if self._num_examples == 0:
raise NotComputableError(
"GeometricMeanAbsoluteError must have at " "least one example before it can be computed."
)
return torch.exp(self._sum_of_errors / self._num_examples).item()
| [
"torch.exp",
"torch.sum"
] | 1.0 | jonrbates/ignite | 15eeb8791a2e0c2f55265e1f6b91f91dc35286c5 |
1.1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
from .utils import weight_reduce_loss
from icecream import ic
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):
# element-wise losses
loss = F.cross_entropy(pred, label, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_binary_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(labels >= 1).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds] - 1] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None):
if pred.dim() != label.dim():
label, weight = _expand_binary_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None):
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, reduction='mean')[None]
@LOSSES.register_module
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
loss_weight=1.0):
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nonzero",
"torch.nn.functional.cross_entropy",
"torch.arange"
] | 1.1 | cizhenshi/mmdetection | b0fe89677020ebe9e6a736b98d3e791ca0e6536d |
1.4 | import argparse
import os
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from models import curves_pam
from utils import utils, alignment, data
import models
import definitions
import copy
parser = argparse.ArgumentParser(description='Trains a curve between two neural networks using PAM.')
parser.add_argument('--dir', type=str, default='model_dicts/curve_pam_models/', metavar='DIR',
help='directory for saving curve models (default: model_dicts/curve_pam_models/)')
parser.add_argument('--dir2', type=str, default='model_data/training/curve_pam_models/', metavar='DIR',
help='directory for saving curve models data (default: model_data/training/curve_pam_models/)')
parser.add_argument('--data_path', type=str, default='data/', metavar='PATH',
help='path to datasets location (default: data/)')
parser.add_argument('--dir_models', type=str, default='model_dicts/basic_models/', metavar='ENDPOINTS',
help='directory to model dicts for the curve endpoints. (default: model_dicts/basic_models/)')
parser.add_argument('--dir_alignment', type=str, default='model_dicts/paired_models/', metavar='DIR',
help='directory to alignments between the endpoint models (default: model_dicts/paired_models/)')
parser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',
help='dataset name (default: CIFAR10)')
parser.add_argument('--use_test', action='store_true', default=True,
help='switches between validation and test set (default: True)')
parser.add_argument('--transform', type=str, default='TinyTen', metavar='TRANSFORM',
help='transform name (default: TinyTen)')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='input batch size (default: 128)')
parser.add_argument('--num-workers', type=int, default=4, metavar='N',
help='number of workers (default: 4)')
parser.add_argument('--model', type=str, default='TinyTen', metavar='MODEL',
help='model name (default: None)')
parser.add_argument('--curve', type=str, default='Bezier', metavar='CURVE',
help='curve type to use (default: Bezier)')
parser.add_argument('--num_bends', type=int, default=3, metavar='N',
help='number of curve bends (default: 3)')
parser.add_argument('--fix_start', dest='fix_start', action='store_true', default=True,
help='fix start point (default: True)')
parser.add_argument('--fix_end', dest='fix_end', action='store_true', default=True,
help='fix end point (default: True)')
parser.set_defaults(init_linear=True)
parser.add_argument('--init_linear_off', dest='init_linear', action='store_false',
help='turns off linear initialization of intermediate points (default: on)')
parser.add_argument('--resume', type=str, default=None, metavar='CKPT',
help='checkpoint to resume training from (default: None)')
parser.add_argument('--outer_iters', type=int, default=1, metavar='N',
help='number of PAM iterations to train (default: 1)')
parser.add_argument('--inner_iters_perm', type=int, default=20, metavar='N',
help='number of epochs to train permutation for each subiteration. (default: 20)')
parser.add_argument('--inner_iters_phi', type=int, default=250, metavar='N',
help='number of epochs to train curve parameters for each subiteration. (default: 250)')
parser.add_argument('--save_freq', type=int, default=270, metavar='N',
help='save frequency (default: 270)')
parser.add_argument('--lr', type=float, default=1E-1, metavar='LR',
help='initial learning rate (default: 0.01)')
parser.add_argument('--wd', type=float, default=5e-4, metavar='WD',
help='weight decay (default: 5e-4)')
parser.add_argument('--lr_decay', type=float, default=0.9996, help='Learning Rate Decay for SGD')
parser.add_argument('--lr_drop', type=int, default=20, help='Number of epochs required to decay learning rate')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--seed_a', type=int, default=None, metavar='S', help='random seed for model 0 (default: None)')
parser.add_argument('--seed_b', type=int, default=None, metavar='S', help='random seed for model 1(default: None)')
parser.add_argument('--epochs_model',
type=int, default=200, metavar='EPOCHS', help='Number of epochs the models were trained for')
parser.add_argument('--alignment', type=str, default='',
help='specify an alignment if the models are to be aligned before curve finding (default: None)')
parser.add_argument('--val_freq', nargs='+', type=int, default=[20, 250],
help='the rate in epochs at which to evaluate the model on the validation set. (default: [20, 250])')
args = parser.parse_args()
args.dir = ('%s%s/%s/' % (args.dir, args.model, args.dataset))
args.dir2 = ('%s%s/%s/' % (args.dir2, args.model, args.dataset))
args.dir_models = ('%s%s/%s/' % (args.dir_models, args.model, args.dataset))
args.dir_alignment = ('%s%s/%s/' % (args.dir_alignment, args.model, args.dataset))
project_root = definitions.get_project_root()
os.chdir(project_root)
os.makedirs(args.dir, exist_ok=True)
os.makedirs(args.dir2, exist_ok=True)
print('Arguments')
for arg in vars(args):
print('%s: %s' % (arg, str(getattr(args, arg))))
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
np.random.seed(args.seed)
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
loaders, num_classes = data.loaders(
args.dataset,
args.data_path,
args.batch_size,
args.num_workers,
args.transform,
args.use_test,
test_batch_size=512
)
model_paths = ['%scheckpoint_seed_%02d-%d.pt' % (args.dir_models, args.seed_a, args.epochs_model),
'%scheckpoint_seed_%02d-%d.pt' % (args.dir_models, args.seed_b, args.epochs_model)]
state_0 = torch.load(model_paths[0], map_location=device)
state_1 = torch.load(model_paths[1], map_location=device)
architecture = getattr(models, args.model)
model_0 = architecture.base(num_classes=num_classes, device=device, **architecture.kwargs)
model_0.load_state_dict(state_0['model_state'])
model_1 = architecture.base(num_classes=num_classes, device=device, **architecture.kwargs)
model_1.load_state_dict(state_1['model_state'])
if args.alignment is not None and args.alignment != '' and args.alignment != 'pam':
matching = np.load('%smatch_%s_seeds_%02d_%02d.npy' %
(args.dir_alignment, args.alignment, args.seed_a, args.seed_b), allow_pickle=True)
if args.model == 'ResNet32':
model_1, _ = alignment.align_models_resnet(model_1, matching)
elif args.model == 'GoogLeNet':
model_1.align_inception(matching)
else:
model_1 = alignment.align_models(model_1, matching)
model_1.to(device)
else:
matching = None
if args.model == 'GoogLeNet':
matching_ref = np.load('%smatch_%s_seeds_%02d_%02d.npy' %
(args.dir_alignment, 'corr', args.seed_a, args.seed_b), allow_pickle=True)
else:
matching_ref = None
curve = getattr(curves_pam, args.curve)
model = curves_pam.CurveNet(
num_classes,
device,
curve,
architecture.curve,
args.num_bends,
args.fix_start,
args.fix_end,
architecture_kwargs=architecture.kwargs,
act_ref=matching_ref
)
perm_params = nn.ParameterList()
for param in model.permutations.parameters():
if param.requires_grad:
perm_params.append(param)
optimizer_perm = optim.SGD(
perm_params,
lr=(args.lr * 5E-1))
optimizer_phi = optim.SGD(
filter(lambda param: param.requires_grad, model.curve_learnable_params),
lr=args.lr,
momentum=0.9,
weight_decay=args.wd if args.curve is None else 0.0,
nesterov=True)
lambda_perm = lambda epoch: 0.5 ** (epoch // 20) * args.lr_decay ** epoch
lambda_phi = lambda epoch: 0.5 ** (epoch // args.lr_drop) * args.lr_decay ** epoch
scheduler_perm = optim.lr_scheduler.LambdaLR(optimizer_perm, lr_lambda=lambda_perm)
scheduler_phi = optim.lr_scheduler.LambdaLR(optimizer_phi, lr_lambda=lambda_phi)
if args.resume is None:
model.import_base_parameters(model_0, 0)
model.import_base_parameters(model_1, 2)
if args.init_linear:
print('Linear initialization.')
model.init_zero()
start_epoch = 1
model.to(device)
model_turningpt = architecture.base(num_classes=num_classes, device=device)
model.export_base_parameters(model_turningpt, 1)
if args.model == 'GoogLeNet':
criterion = utils.googlenet_criterion
else:
criterion = nn.CrossEntropyLoss()
regularizer = None if args.curve is None else curves_pam.l2_regularizer(args.wd)
if args.val_freq is None:
args.val_freq = np.nan
total_iters = args.outer_iters * (args.inner_iters_perm + args.inner_iters_phi)
acc_train = np.ones(total_iters + 1) * np.nan
acc_test = np.ones(total_iters + 1) * np.nan
loss_train = np.ones(total_iters + 1) * np.nan
loss_test = np.ones(total_iters + 1) * np.nan
has_bn = utils.check_bn(model)
lr = args.lr
change_P = np.ones(total_iters + 1) * np.nan
number_batches = len(loaders['test'])
loss_time = np.ones([total_iters+1, number_batches]) * np.nan
acc_time = np.ones([total_iters+1, number_batches]) * np.nan
if args.val_freq[0] is None:
args.val_freq[0] = np.nan
if args.val_freq[1] is None:
args.val_freq[1] = np.nan
print('Beginning training')
for iter in range(start_epoch, args.outer_iters + 1):
params_before = [None] * len(optimizer_perm.param_groups[0]['params'])
for idx, param in enumerate(optimizer_perm.param_groups[0]['params']):
params_before[idx] = param.clone().detach()
for epoch in range(1, args.inner_iters_perm + 1):
for param in optimizer_perm.param_groups[0]['params']:
param.requires_grad = True
for param in optimizer_phi.param_groups[0]['params']:
param.requires_grad = False
test_res = {'loss': np.nan, 'accuracy': np.nan, 'nll': np.nan, 'loss_time': np.nan, 'acc_time': np.nan}
time_ep = time.time()
if args.curve is None or not has_bn or epoch % args.val_freq[0] == 1 or args.val_freq[0] == 1:
test_res = utils.test_perm(loaders['test'], model, criterion, regularizer=regularizer,
train_loader=loaders['train'], bn_eval=False, samp_t=True)
idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch
loss_test[idx] = test_res['loss']
acc_test[idx] = test_res['accuracy']
loss_time[idx, :] = test_res['loss_time']
acc_time[idx, :] = test_res['acc_time']
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
train_res = utils.train_perm(loaders['train'], model, optimizer_perm, scheduler_perm, criterion,
params_old=params_before, regularizer=None, nu=1E3, proj_flag=False,
pen_flag=True, lp_pen=None, tqdm_summary=False)
scheduler_perm.step()
time_ep = time.time() - time_ep
print('Outer Iteration %2d, Permutation Iteration %2d, Training Loss: %.3E, Training Accuracy: %.2f, '
'Validation Loss: %.3E, Validation Accuracy: %.2f, Time Elapsed: %.2fs' %
(iter, scheduler_perm.last_epoch, train_res['loss'], train_res['accuracy'], test_res['nll'],
test_res['accuracy'], time_ep))
idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch
loss_train[idx] = train_res['loss']
acc_train[idx] = train_res['accuracy']
print('Doubly Stochastic Matrix', optimizer_perm.param_groups[0]['params'][0].data.cpu().detach().numpy())
utils.sample_permutation(model, optimizer_perm, loaders['train'], loaders['train'], criterion, params_before, k=32)
print('Permutation Sampled', optimizer_perm.param_groups[0]['params'][0].data.cpu().detach().numpy())
with torch.no_grad():
bb = []
for param, param_o in zip(optimizer_perm.param_groups[0]['params'], params_before):
bb.append(torch.sum(param * param_o).item())
print(bb)
params_before = [None] * len(optimizer_phi.param_groups[0]['params'])
for idx, param in enumerate(optimizer_phi.param_groups[0]['params']):
params_before[idx] = param.detach().clone()
for epoch in range(1, args.inner_iters_phi + 1):
for param in optimizer_perm.param_groups[0]['params']:
param.requires_grad = False
for param in optimizer_phi.param_groups[0]['params']:
param.requires_grad = True
test_res = {'loss': np.nan, 'accuracy': np.nan, 'nll': np.nan}
time_ep = time.time()
if args.curve is None or not has_bn or epoch % args.val_freq[1] == 1 or args.val_freq[1] == 1:
test_res = utils.test_perm(loaders['test'], model, criterion, regularizer=regularizer,
train_loader=loaders['train'], bn_eval=False, samp_t=True)
idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch
loss_test[idx] = test_res['loss']
acc_test[idx] = test_res['accuracy']
loss_time[idx, :] = test_res['loss_time']
acc_time[idx, :] = test_res['acc_time']
train_res = utils.train_perm(loaders['train'], model, optimizer_phi, scheduler_phi, criterion,
params_old=params_before, regularizer=regularizer, nu=1E3)
scheduler_phi.step()
time_ep = time.time() - time_ep
print('Outer Iteration %2d, Curve Iteration %2d, Training Loss: %.3E, Training Accuracy: %.2f, '
'Validation Loss: %.3E, Validation Accuracy: %.2f, Time Elapsed: %.2fs' %
(iter, scheduler_phi.last_epoch, train_res['loss'], train_res['accuracy'], test_res['nll'],
test_res['accuracy'], time_ep))
idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch
loss_train[idx] = train_res['loss']
acc_train[idx] = train_res['accuracy']
test_res = utils.test_perm(loaders['test'], model, criterion, regularizer, train_loader=loaders['train'], bn_eval=False,
samp_t=True)
loss_test[idx] = test_res['loss']
acc_test[idx] = test_res['accuracy']
loss_time[idx, :] = test_res['loss_time']
acc_time[idx, :] = test_res['acc_time']
if args.model == 'GoogLeNet':
pam_perm = []
for perm in model.permutations[1:-1]:
if len(perm) == 1:
pam_perm.append(perm[0].cpu().numpy())
else:
sub_list = []
for sub_perm in perm:
sub_list.append(sub_perm.cpu().numpy())
pam_perm.append(sub_list)
else:
pam_perm = [torch.nonzero(i)[:, 1].cpu().numpy() for i in model.permutations]
pam_perm = pam_perm[1:-1]
if matching is not None:
pam_perm = [match_og[match_perm] for (match_og, match_perm) in zip(matching, pam_perm)]
model.export_base_parameters(model_turningpt, 1)
model_turningpt_fin = copy.deepcopy(model_turningpt)
if args.model == 'GoogLeNet':
model.weight_permutation(model_1)
else:
model.weight_permutation()
model.init_linear()
model.export_base_parameters(model_turningpt, 1)
for param_0, param_1 in zip(model_turningpt_fin.parameters(), model_turningpt.parameters()):
param_0.data += param_1.data
utils.save_checkpoint(
args.dir,
total_iters,
name='checkpoint_align_pam_%s_seeds_%02d_%02d' % (args.alignment, args.seed_a, args.seed_b),
model_state=model_turningpt_fin.state_dict(),
optimizer_state_perm=optimizer_perm.state_dict(),
optimizer_state_phi=optimizer_phi.state_dict()
)
np.save('%smatch_pam_%s_seeds_%02d_%02d.npy' % (args.dir_alignment, args.alignment, args.seed_a, args.seed_b), pam_perm)
curve_data = {'acc_train': acc_train, 'loss_train': loss_train, 'acc_test': acc_test, 'loss_test': loss_test,
'iters_perm': args.inner_iters_perm, 'iters_phi': args.inner_iters_phi,
'loss_time': loss_time, 'acc_time': acc_time, 'change_perm': change_P}
np.save('%scurve_align_pam_%s_seeds_%02d_%02d.npy' % (args.dir2, args.alignment, args.seed_a, args.seed_b), curve_data)
| [
"torch.device",
"torch.nonzero",
"torch.nn.ParameterList",
"torch.cuda.manual_seed",
"torch.no_grad",
"torch.optim.SGD",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.4.0 | IBM/NeuronAlignment | 5b82b60666db1fac72e53db07529a3328ee549c4 |
1.1 | import os
from argparse import Namespace
import numpy as np
import torch
# from pl_examples import LightningTemplateModel
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TestTubeLogger, TensorBoardLogger
from tests.base import LightningTestModel, EvalModelTemplate
from tests.base.datasets import PATH_DATASETS
# generate a list of random seeds for each test
RANDOM_PORTS = list(np.random.randint(12000, 19000, 1000))
ROOT_SEED = 1234
torch.manual_seed(ROOT_SEED)
np.random.seed(ROOT_SEED)
RANDOM_SEEDS = list(np.random.randint(0, 10000, 1000))
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
def assert_speed_parity(pl_times, pt_times, num_epochs):
# assert speeds
max_diff_per_epoch = 0.9
pl_times = np.asarray(pl_times)
pt_times = np.asarray(pt_times)
diffs = pl_times - pt_times
diffs = diffs / num_epochs
assert np.alltrue(diffs < max_diff_per_epoch), \
f"lightning was slower than PT (threshold {max_diff_per_epoch})"
def run_model_test_no_loggers(trainer_options, model, min_acc=0.50):
# save_dir = trainer_options['default_root_dir']
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# correct result and ok accuracy
assert result == 1, 'amp + ddp model failed to complete'
# test model loading
pretrained_model = load_model(trainer.logger,
trainer.checkpoint_callback.dirpath,
path_expt=trainer_options.get('default_root_dir'))
# test new model accuracy
test_loaders = model.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
for dataloader in test_loaders:
run_prediction(dataloader, pretrained_model, min_acc=min_acc)
if trainer.use_ddp:
# on hpc this would work fine... but need to hack it for the purpose of the test
trainer.model = pretrained_model
trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers()
def run_model_test(trainer_options, model, on_gpu=True):
save_dir = trainer_options['default_root_dir']
# logger file to get meta
logger = get_default_testtube_logger(save_dir, False)
# logger file to get weights
checkpoint = init_checkpoint_callback(logger)
# add these to the trainer options
trainer_options['checkpoint_callback'] = checkpoint
trainer_options['logger'] = logger
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# correct result and ok accuracy
assert result == 1, 'amp + ddp model failed to complete'
# test model loading
pretrained_model = load_model(logger, trainer.checkpoint_callback.dirpath)
# test new model accuracy
test_loaders = model.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
[run_prediction(dataloader, pretrained_model) for dataloader in test_loaders]
if trainer.use_ddp or trainer.use_ddp2:
# on hpc this would work fine... but need to hack it for the purpose of the test
trainer.model = pretrained_model
trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = \
trainer.init_optimizers(pretrained_model)
# test HPC loading / saving
trainer.hpc_save(save_dir, logger)
trainer.hpc_load(save_dir, on_gpu=on_gpu)
def get_default_hparams(continue_training=False, hpc_exp_number=0):
_ = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
args = {
'drop_prob': 0.2,
'batch_size': 32,
'in_features': 28 * 28,
'learning_rate': 0.001 * 8,
'optimizer_name': 'adam',
'data_root': PATH_DATASETS,
'out_features': 10,
'hidden_dim': 1000,
}
if continue_training:
args['test_tube_do_checkpoint_load'] = True
args['hpc_exp_number'] = hpc_exp_number
hparams = Namespace(**args)
return hparams
def get_default_model(lbfgs=False):
# set up model with these hyperparams
hparams = get_default_hparams()
if lbfgs:
setattr(hparams, 'optimizer_name', 'lbfgs')
setattr(hparams, 'learning_rate', 0.002)
model = LightningTestModel(hparams)
return model, hparams
def get_default_testtube_logger(save_dir, debug=True, version=None):
# set up logger object without actually saving logs
logger = TestTubeLogger(save_dir, name='lightning_logs', debug=debug, version=version)
return logger
def get_data_path(expt_logger, path_dir=None):
# some calls contain only experiment not complete logger
expt = expt_logger.experiment if hasattr(expt_logger, 'experiment') else expt_logger
# each logger has to have these attributes
name, version = expt_logger.name, expt_logger.version
# only the test-tube experiment has such attribute
if hasattr(expt, 'get_data_path'):
return expt.get_data_path(name, version)
# the other experiments...
if not path_dir:
path_dir = ROOT_PATH
path_expt = os.path.join(path_dir, name, 'version_%s' % version)
# try if the new sub-folder exists, typical case for test-tube
if not os.path.isdir(path_expt):
path_expt = path_dir
return path_expt
def load_model(exp, root_weights_dir, module_class=LightningTestModel, path_expt=None):
# load trained model
path_expt_dir = get_data_path(exp, path_dir=path_expt)
tags_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_CSV_TAGS)
checkpoints = [x for x in os.listdir(root_weights_dir) if '.ckpt' in x]
weights_dir = os.path.join(root_weights_dir, checkpoints[0])
trained_model = module_class.load_from_checkpoint(
checkpoint_path=weights_dir,
tags_csv=tags_path
)
assert trained_model is not None, 'loading model failed'
return trained_model
def load_model_from_checkpoint(root_weights_dir, module_class=LightningTestModel):
# load trained model
checkpoints = [x for x in os.listdir(root_weights_dir) if '.ckpt' in x]
weights_dir = os.path.join(root_weights_dir, checkpoints[0])
trained_model = module_class.load_from_checkpoint(
checkpoint_path=weights_dir,
)
assert trained_model is not None, 'loading model failed'
return trained_model
def run_prediction(dataloader, trained_model, dp=False, min_acc=0.5):
# run prediction on 1 batch
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
if dp:
output = trained_model(batch, 0)
acc = output['val_acc']
acc = torch.mean(acc).item()
else:
y_hat = trained_model(x)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
acc = torch.tensor(acc)
acc = acc.item()
assert acc >= min_acc, f"This model is expected to get > {min_acc} in test set (it got {acc})"
def assert_ok_model_acc(trainer, key='test_acc', thr=0.5):
# this model should get 0.80+ acc
acc = trainer.training_tqdm_dict[key]
assert acc > thr, f"Model failed to get expected {thr} accuracy. {key} = {acc}"
def reset_seed():
seed = RANDOM_SEEDS.pop()
torch.manual_seed(seed)
np.random.seed(seed)
def set_random_master_port():
port = RANDOM_PORTS.pop()
os.environ['MASTER_PORT'] = str(port)
def init_checkpoint_callback(logger, path_dir=None):
exp_path = get_data_path(logger, path_dir=path_dir)
ckpt_dir = os.path.join(exp_path, 'checkpoints')
os.mkdir(ckpt_dir)
checkpoint = ModelCheckpoint(ckpt_dir)
return checkpoint
| [
"torch.manual_seed",
"torch.tensor",
"torch.mean",
"torch.argmax",
"torch.sum"
] | 1.1 | gangiman/pytorch-lightning | 9b31272cf0f3079a244944096b4a81eec20fe555 |
1.4 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
"""
import random
import numpy as np
import torch.utils.data as data
import torch
from PIL import Image
import torchvision.transforms as transforms
from abc import ABC, abstractmethod
class BaseDataset(data.Dataset, ABC):
"""This class is an abstract base class (ABC) for datasets.
To create a subclass, you need to implement the following four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the class; save the options in the class
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
self.opt = opt
self.root = opt.dataroot
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def __len__(self):
"""Return the total number of images in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns:
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
"""
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.preprocess == 'resize_and_crop':
new_h = new_w = opt.load_size
elif opt.preprocess == 'scale_width_and_crop':
new_w = opt.load_size
new_h = opt.load_size * h // w
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, noise=False, grayscale=False, method=Image.BICUBIC):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if 'resize' in opt.preprocess:
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
if 'crop' in opt.preprocess:
if params is None:
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
if opt.preprocess == 'none':
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
transform_list.append(transforms.ToTensor())
if noise:
transform_list.append(transforms.ColorJitter(brightness=(0.8, 1.2), contrast=(0.8, 1.2),
saturation=(0.8, 1.2), hue=(-0.05, 0.05)))
transform_list.append(transforms.Lambda(lambda x: coarse_dropout(x)))
transform_list.append(transforms.Lambda(lambda x: x + torch.rand(x.shape)*0.05))
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if h == oh and w == ow:
return img
__print_size_warning(ow, oh, w, h)
return img.resize((w, h), method)
def __scale_width(img, target_size, crop_size, method=Image.BICUBIC):
ow, oh = img.size
if ow == target_size and oh >= crop_size:
return img
w = target_size
h = int(max(target_size * oh / ow, crop_size))
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def __print_size_warning(ow, oh, w, h):
"""Print warning information about image size(only print once)"""
if not hasattr(__print_size_warning, 'has_printed'):
print("The image size needs to be a multiple of 4. "
"The loaded image size was (%d, %d), so it was adjusted to "
"(%d, %d). This adjustment will be done to all images "
"whose sizes are not multiples of 4" % (ow, oh, w, h))
__print_size_warning.has_printed = True
def coarse_dropout(image, count=200, max_size=0.05):
dim = image.shape[1]
for k in range(count):
x = (torch.rand(1) * image.shape[1]).int()
y = (torch.rand(1) * image.shape[2]).int()
height = (dim* max_size * torch.rand(1))
ya = torch.max(torch.tensor(0), y - height // 2).int()
yb = torch.min(torch.tensor(dim), y + height // 2).int()
xa = torch.max(torch.tensor(0), x - height // 2).int()
xb = torch.min(torch.tensor(dim), x + height // 2).int()
one = image[:, ya:yb, 0:xa]
two = torch.rand([3, yb - ya, xb - xa]) * 1
three = image[:, ya:yb, xb:dim]
middle = torch.cat([one, two, three], axis=2)
image = torch.cat([image[:, 0:ya, :], middle, image[:, yb:dim, :]], axis=1)
image = torch.reshape(image, [3, dim, dim])
return image | [
"torch.reshape",
"torch.rand",
"torch.cat",
"torch.tensor"
] | 1.4.0 | loerssoni/pytorch-CycleGAN-and-pix2pix | 289287f9e4bd948a306627b32fc6b57b78420121 |
0.4 | """
LOAD DATA from file.
"""
# pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915
##
import os
import torch
import numpy as np
import torchvision.datasets as datasets
from torchvision.datasets import MNIST
from torchvision.datasets import CIFAR10
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
##
def load_data(opt):
""" Load Data
Args:
opt ([type]): Argument Parser
Raises:
IOError: Cannot Load Dataset
Returns:
[type]: dataloader
"""
##
# LOAD DATA SET
if opt.dataroot == '':
opt.dataroot = './data/{}'.format(opt.dataset)
if opt.dataset in ['cifar10']:
splits = ['train', 'test']
drop_last_batch = {'train': True, 'test': False}
shuffle = {'train': True, 'test': False}
transform = transforms.Compose(
[
transforms.Resize(opt.isize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
classes = {
'plane': 0, 'car': 1, 'bird': 2, 'cat': 3, 'deer': 4,
'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9
}
dataset = {}
dataset['train'] = CIFAR10(root='./data', train=True, download=True, transform=transform)
dataset['test'] = CIFAR10(root='./data', train=False, download=True, transform=transform)
dataset['train'].train_data, dataset['train'].train_labels, \
dataset['test'].test_data, dataset['test'].test_labels = get_cifar_anomaly_dataset(
trn_img=dataset['train'].train_data,
trn_lbl=dataset['train'].train_labels,
tst_img=dataset['test'].test_data,
tst_lbl=dataset['test'].test_labels,
abn_cls_idx=classes[opt.anomaly_class]
)
dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],
batch_size=opt.batchsize,
shuffle=shuffle[x],
num_workers=int(opt.workers),
drop_last=drop_last_batch[x]) for x in splits}
return dataloader
elif opt.dataset in ['mnist']:
opt.anomaly_class = int(opt.anomaly_class)
splits = ['train', 'test']
drop_last_batch = {'train': True, 'test': False}
shuffle = {'train': True, 'test': True}
transform = transforms.Compose(
[
transforms.Scale(opt.isize),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
)
dataset = {}
dataset['train'] = MNIST(root='./data', train=True, download=True, transform=transform)
dataset['test'] = MNIST(root='./data', train=False, download=True, transform=transform)
dataset['train'].train_data, dataset['train'].train_labels, \
dataset['test'].test_data, dataset['test'].test_labels = get_mnist_anomaly_dataset(
trn_img=dataset['train'].train_data,
trn_lbl=dataset['train'].train_labels,
tst_img=dataset['test'].test_data,
tst_lbl=dataset['test'].test_labels,
abn_cls_idx=opt.anomaly_class
)
dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],
batch_size=opt.batchsize,
shuffle=shuffle[x],
num_workers=int(opt.workers),
drop_last=drop_last_batch[x]) for x in splits}
return dataloader
elif opt.dataset in ['mnist2']:
opt.anomaly_class = int(opt.anomaly_class)
splits = ['train', 'test']
drop_last_batch = {'train': True, 'test': False}
shuffle = {'train': True, 'test': True}
transform = transforms.Compose(
[
transforms.Scale(opt.isize),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]
)
dataset = {}
dataset['train'] = MNIST(root='./data', train=True, download=True, transform=transform)
dataset['test'] = MNIST(root='./data', train=False, download=True, transform=transform)
dataset['train'].train_data, dataset['train'].train_labels, \
dataset['test'].test_data, dataset['test'].test_labels = get_mnist2_anomaly_dataset(
trn_img=dataset['train'].train_data,
trn_lbl=dataset['train'].train_labels,
tst_img=dataset['test'].test_data,
tst_lbl=dataset['test'].test_labels,
nrm_cls_idx=opt.anomaly_class,
proportion=opt.proportion
)
dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],
batch_size=opt.batchsize,
shuffle=shuffle[x],
num_workers=int(opt.workers),
drop_last=drop_last_batch[x]) for x in splits}
return dataloader
else:
splits = ['train', 'test']
drop_last_batch = {'train': True, 'test': False}
shuffle = {'train': True, 'test': True}
transform = transforms.Compose([transforms.Scale(opt.isize),
transforms.CenterCrop(opt.isize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])
dataset = {x: ImageFolder(os.path.join(opt.dataroot, x), transform) for x in splits}
dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],
batch_size=opt.batchsize,
shuffle=shuffle[x],
num_workers=int(opt.workers),
drop_last=drop_last_batch[x]) for x in splits}
return dataloader
##
def get_cifar_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, abn_cls_idx=0, manualseed=-1):
"""[summary]
Arguments:
trn_img {np.array} -- Training images
trn_lbl {np.array} -- Training labels
tst_img {np.array} -- Test images
tst_lbl {np.array} -- Test labels
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# Convert train-test labels into numpy array.
trn_lbl = np.array(trn_lbl)
tst_lbl = np.array(tst_lbl)
# --
# Find idx, img, lbl for abnormal and normal on org dataset.
nrm_trn_idx = np.where(trn_lbl != abn_cls_idx)[0]
abn_trn_idx = np.where(trn_lbl == abn_cls_idx)[0]
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_idx = np.where(tst_lbl != abn_cls_idx)[0]
abn_tst_idx = np.where(tst_lbl == abn_cls_idx)[0]
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# --
if manualseed != -1:
# Random seed.
# Concatenate the original train and test sets.
nrm_img = np.concatenate((nrm_trn_img, nrm_tst_img), axis=0)
nrm_lbl = np.concatenate((nrm_trn_lbl, nrm_tst_lbl), axis=0)
abn_img = np.concatenate((abn_trn_img, abn_tst_img), axis=0)
abn_lbl = np.concatenate((abn_trn_lbl, abn_tst_lbl), axis=0)
# Split the normal data into the new train and tests.
idx = np.arange(len(nrm_lbl))
np.random.seed(manualseed)
np.random.shuffle(idx)
nrm_trn_len = int(len(idx) * 0.80)
nrm_trn_idx = idx[:nrm_trn_len]
nrm_tst_idx = idx[nrm_trn_len:]
nrm_trn_img = nrm_img[nrm_trn_idx]
nrm_trn_lbl = nrm_lbl[nrm_trn_idx]
nrm_tst_img = nrm_img[nrm_tst_idx]
nrm_tst_lbl = nrm_lbl[nrm_tst_idx]
# Create new anomaly dataset based on the following data structure:
# - anomaly dataset
# . -> train
# . -> normal
# . -> test
# . -> normal
# . -> abnormal
new_trn_img = np.copy(nrm_trn_img)
new_trn_lbl = np.copy(nrm_trn_lbl)
new_tst_img = np.concatenate((nrm_tst_img, abn_trn_img, abn_tst_img), axis=0)
new_tst_lbl = np.concatenate((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), axis=0)
return new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl
##
def get_mnist_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, abn_cls_idx=0, manualseed=-1):
"""[summary]
Arguments:
trn_img {np.array} -- Training images
trn_lbl {np.array} -- Training labels
tst_img {np.array} -- Test images
tst_lbl {np.array} -- Test labels
Keyword Arguments:
abn_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[np.array] -- New training-test images and labels.
"""
# --
# Find normal abnormal indexes.
nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0])
abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0])
nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0])
abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0])
# --
# Find normal and abnormal images
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
# --
# Find normal and abnormal labels.
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# --
if manualseed != -1:
# Random seed.
# Concatenate the original train and test sets.
nrm_img = torch.cat((nrm_trn_img, nrm_tst_img), dim=0)
nrm_lbl = torch.cat((nrm_trn_lbl, nrm_tst_lbl), dim=0)
abn_img = torch.cat((abn_trn_img, abn_tst_img), dim=0)
abn_lbl = torch.cat((abn_trn_lbl, abn_tst_lbl), dim=0)
# Split the normal data into the new train and tests.
idx = np.arange(len(nrm_lbl))
np.random.seed(manualseed)
np.random.shuffle(idx)
nrm_trn_len = int(len(idx) * 0.80)
nrm_trn_idx = idx[:nrm_trn_len]
nrm_tst_idx = idx[nrm_trn_len:]
nrm_trn_img = nrm_img[nrm_trn_idx]
nrm_trn_lbl = nrm_lbl[nrm_trn_idx]
nrm_tst_img = nrm_img[nrm_tst_idx]
nrm_tst_lbl = nrm_lbl[nrm_tst_idx]
# Create new anomaly dataset based on the following data structure:
new_trn_img = nrm_trn_img.clone()
new_trn_lbl = nrm_trn_lbl.clone()
new_tst_img = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0)
new_tst_lbl = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0)
return new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl
##
def get_mnist2_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, nrm_cls_idx=0, proportion=0.5):
""" Create mnist 2 anomaly dataset.
Arguments:
trn_img {np.array} -- Training images
trn_lbl {np.array} -- Training labels
tst_img {np.array} -- Test images
tst_lbl {np.array} -- Test labels
Keyword Arguments:
nrm_cls_idx {int} -- Anomalous class index (default: {0})
Returns:
[tensor] -- New training-test images and labels.
"""
# --
# Find normal abnormal indexes.
# TODO: PyTorch v0.4 has torch.where function
nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == nrm_cls_idx)[0])
abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != nrm_cls_idx)[0])
nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == nrm_cls_idx)[0])
abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != nrm_cls_idx)[0])
# Get n percent of the abnormal samples.
abn_tst_idx = abn_tst_idx[torch.randperm(len(abn_tst_idx))]
abn_tst_idx = abn_tst_idx[:int(len(abn_tst_idx) * proportion)]
# --
# Find normal and abnormal images
nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images
abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.
nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images
abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.
# --
# Find normal and abnormal labels.
nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels
abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.
nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels
abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.
# --
# Assign labels to normal (0) and abnormals (1)
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
# Create new anomaly dataset based on the following data structure:
new_trn_img = nrm_trn_img.clone()
new_trn_lbl = nrm_trn_lbl.clone()
new_tst_img = torch.cat((nrm_tst_img, abn_tst_img), dim=0)
new_tst_lbl = torch.cat((nrm_tst_lbl, abn_tst_lbl), dim=0)
return new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl | [
"torch.cat"
] | 0.4.0 | lss616263/ganomaly | 0e8ddd7b97fdbe35b33d607cddaf62f36cb591c8 |
1.8 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
from typing import Optional, Union
import math
import torch
from torch import nn
from model.encoder_internal import BasePrefixAwareLayer, BaseLogitAdjustableLayer
from model.hashembed import HashEmbedding
class HashCodeAwareLogits(BaseLogitAdjustableLayer):
def __init__(self, n_digits: int, n_ary_out: int,
num_embeddings: int, embedding_dim: int, num_buckets: int,
additive: bool,
logit_adjustment: bool,
matrix_rank_reduction: bool = False,
num_hashes=2,
replace_trailing_zeroes: bool = False,
**kwargs):
"""
Prefix-aware (=y_{<d}) logit layer implemented on top of Hash Embedings.
@param n_digits: number of digits of sense code.
@param n_ary_out: number of ary of sense code.
@param num_embeddings: vocabulary size of Hash Embeddings.
@param embedding_dim: number of hidden dimensions.
@param num_buckets: number of unique shared embeddings of Hash Embeddings algorithm.
@param additive: average along with prefixes (=sense hierarchy).
@param logit_adjustment: adjust logit score using prefix-aware prior probability
@param matrix_rank_reduction: apply rank reduction to logit coefficient matrix. it doesn't work well so far.
@param num_hashes:
@param replace_trailing_zeroes: replace prefix index of zeroes with last non-zero indices.
@param kwargs:
"""
if logit_adjustment:
for required_argument in ("logit_adjust_tau", "logit_adjust_when"):
assert required_argument in kwargs, f"argument {required_argument} must be specified."
super().__init__(replace_trailing_zeroes=replace_trailing_zeroes, null_prefix_index=0,
num_classes=n_ary_out, unobserved_class_fill_strategy=kwargs.get("unobserved_class_fill_strategy", "min"),
smoothing_alpha=kwargs.get("smoothing_alpha", 0.1),
logit_adjust_when=kwargs["logit_adjust_when"],
logit_adjust_tau=kwargs["logit_adjust_tau"])
else:
super().__init__(replace_trailing_zeroes=replace_trailing_zeroes, null_prefix_index=0,
logit_adjust_when=False)
self._n_digits = n_digits
self._n_ary = n_ary_out
self._n_dim_emb = embedding_dim
self._n_distinc_prefix = num_embeddings
self._logit_adjustment = logit_adjustment
self._additive = additive
self._matrix_rank_reduction = matrix_rank_reduction
if matrix_rank_reduction:
self._matrix_rank = max(int(math.sqrt(n_ary_out)), n_ary_out // 8)
else:
self._matrix_rank = n_ary_out
# prefix hash から HashEmbeddingsを使って n_ary * n_dim_emb 個のparameterをlookupする
if matrix_rank_reduction:
_embedding_dim = (n_ary_out + embedding_dim) * self._matrix_rank
else:
_embedding_dim = embedding_dim*n_ary_out
self._logit_layer_weights = HashEmbedding(num_embeddings=num_embeddings, num_hashes=num_hashes,
embedding_dim=_embedding_dim,
num_buckets=num_buckets, append_weight=False)
def forward(self, input_sequence: torch.Tensor, t_representation: torch.Tensor):
# input_sequence: (n_batch, n_digits_so_far) input_sequence[b,d] \in {0,n_ary_in}
# t_representation: (n_batch, n_digits_so_far, n_dim)
n_digits_so_far = min(self._n_digits, input_sequence.shape[-1])
# input_sequence_prefix_hashes: (n_batch, n_digits_so_far)
input_sequence_prefix_hashes = self.transform_sequence_to_prefix_indices(input_sequence)
# t_weight_: (n_batch, n_digits_so_far, n_ary_out * n_dim)
t_weight_ = self._logit_layer_weights.forward(input_sequence_prefix_hashes)
if self._additive:
# moving average from MSD to d-th digits.
t_weight_ = torch.cumsum(t_weight_, dim=1)
# by dividing number of digits, it may avoid nan error.
# t_denom: (1, n_digits_so_far, 1)
t_denom = torch.arange(start=1, end=n_digits_so_far+1, device=t_weight_.device).view(1, -1, 1)
t_weight_ = t_weight_ / t_denom
# t_weight: (n_batch, n_digits_so_far, n_ary_out, n_dim)
if self._matrix_rank_reduction:
# u: (n_batch, n_digits_so_far, n_ary_out, n_rank)
# v: (n_batch, n_digits_so_far, n_rank, n_dim)
t_weight_u, t_weight_v = torch.split(t_weight_, [self._n_ary*self._matrix_rank, self._n_dim_emb*self._matrix_rank], dim=-1)
t_weight_u = t_weight_u.view((-1, n_digits_so_far, self._n_ary, self._matrix_rank))
t_weight_v = t_weight_v.view((-1, n_digits_so_far, self._matrix_rank, self._n_dim_emb))
# t_logits_: (n_batch, n_digits_so_far, n_rank, 1)
t_logits_ = torch.matmul(t_weight_v, t_representation.unsqueeze(-1))
# t_logits: (n_batch, n_digits_so_far, n_ary_out)
t_logits = torch.matmul(t_weight_u, t_logits_).squeeze(-1)
else:
t_weight = t_weight_.view((-1, n_digits_so_far, self._n_ary, self._n_dim_emb))
# t_logits: (n_batch, n_digits_so_far, n_ary_out)
t_logits = torch.matmul(t_weight, t_representation.unsqueeze(-1)).squeeze(-1)
if self._logit_adjustment:
t_logits = super().apply_logit_adjustment(logits=t_logits, sequences=input_sequence)
return t_logits
def init_weights(self, *args, **kwargs):
self._logit_layer_weights.reset_parameters(std=0.00001)
def summary(self):
ret = super().summary()
ret["matrix_rank_reduction"] = self._matrix_rank_reduction
ret["matrix_rank"] = self._matrix_rank
ret["num_buckets"] = self._logit_layer_weights.num_buckets
ret["num_hashes"] = self._logit_layer_weights.num_hashes
ret["additive"] = self._additive
return ret
class AdditiveCodeAwareLogits(torch.nn.Module):
def __init__(self, n_digits: int, n_ary_in: int, n_ary_out: int, n_dim_emb: int,
bias: bool = False,
depends_on_previous_digits: Optional[int] = None,
**kwargs):
super().__init__()
self._n_digits = n_digits
self._n_ary_in = n_ary_in
self._n_ary_out = n_ary_out
self._n_dim_emb = n_dim_emb
self._bias = bias
self._depends_on_previous_digits = depends_on_previous_digits
cfg_base_weight_layer = {
"num_embeddings": n_ary_in,
"embedding_dim": n_ary_out * n_dim_emb
}
cfg_base_weight_layer.update(kwargs)
# base_weight_layers: (n_digit, n_ary_in, n_ary_out * n_dim)
lst_base_weight_layers = [nn.Embedding(**cfg_base_weight_layer) for _ in range(n_digits)]
self.base_weight_layers = nn.ModuleList(lst_base_weight_layers)
# offset_weights: (n_digit, n_ary_out * n_dim)
if bias:
self.bias_weights = nn.Parameter(torch.zeros(size=(n_digits, n_ary_out * n_dim_emb)), requires_grad=True)
self.init_weights()
def init_weights(self):
for layer in self.base_weight_layers:
nn.init.zeros_(layer.weight)
def _ragged_cumsum(self, tensor: torch.Tensor, dim: int, stride: Optional[int]):
if stride is None:
# t_cumsum[:,d,:] = tensor[:,:d+1,:].sum(dim=dim)
t_cumsum = torch.cumsum(tensor, dim=dim)
else:
# t_cumsum[:,d,:] = tensor[:,(d-stride):d+1,:].sum(dim=dim)
shp = list(tensor.shape)
length = shp[dim]
_stride = min(stride + 1, length)
t_ = torch.cumsum(tensor, dim=dim)
shp[dim] = _stride
pad = torch.zeros(shp, dtype=tensor.dtype).to(tensor.device)
index = torch.arange(end=length - _stride).to(tensor.device)
t_ragged = torch.index_select(t_, dim=dim, index=index)
t_cumsum = t_ - torch.cat((pad, t_ragged), dim=dim)
return t_cumsum
def forward(self, input_sequence: torch.Tensor, t_representation: torch.Tensor):
# input_sequence: (n_batch, n_digits_so_far) input_sequence[b,d] \in {0,n_ary_in}
# t_representation: (n_batch, n_digits_so_far, n_dim)
device = input_sequence.device
n_digits_so_far = min(self._n_digits, input_sequence.shape[-1])
lst_base_weights = [self.base_weight_layers[digit](input_sequence[:,digit]) for digit in range(n_digits_so_far)]
# t_base_weight: (n_batch, n_digits_so_far, n_ary_out * n_dim)
t_base_weight = torch.stack(lst_base_weights, dim=1)
if self._depends_on_previous_digits is None:
t_weight_ = torch.cumsum(t_base_weight, dim=1)
# by dividing number of digits, it may avoid nan error.
# t_denom: (1, n_digits_so_far, 1)
t_denom = torch.arange(start=1, end=n_digits_so_far+1, device=device).view(1, -1, 1)
t_weight_ = t_weight_ / t_denom
else:
t_weight_ = self._ragged_cumsum(t_base_weight, dim=1, stride=min(self._depends_on_previous_digits, n_digits_so_far))
if self._bias:
t_weight_ = t_weight_ + self.bias_weights[:n_digits_so_far, :]
# t_weight: (n_batch, n_digits_so_far, n_ary_out, n_dim)
t_weight = t_weight_.view((-1, n_digits_so_far, self._n_ary_out, self._n_dim_emb))
# t_logits: (n_batch, n_digits_so_far, n_ary_out)
t_logits = torch.matmul(t_weight, t_representation.unsqueeze(-1)).squeeze(-1)
return t_logits
def summary(self):
ret = {}
for attr_name in ("bias", "depends_on_previous_digits", "n_ary_in", "n_ary_out"):
ret[attr_name] = getattr(self, f"_{attr_name}")
return ret
class PositionAwareLogits(torch.nn.Module):
def __init__(self, n_seq_len: int = None, **kwargs):
super().__init__()
if isinstance(n_seq_len, int):
lst_layers = [nn.Linear(**kwargs) for _ in range(n_seq_len)]
self.linear_layers = nn.ModuleList(lst_layers)
else:
self.linear_layers = nn.Linear(**kwargs)
self.n_seq_len = n_seq_len
self.init_weights()
def init_weights(self):
if isinstance(self.linear_layers, nn.ModuleList):
for layer in self.linear_layers:
nn.init.zeros_(layer.weight)
else:
nn.init.zeros_(self.linear_layers.weight)
def forward(self, t_representation: torch.Tensor, **kwargs) -> torch.Tensor:
# t_representation: (n_batch, n_digits_so_far, n_dim)
assert t_representation.ndim == 3, f"unexpected dimension size: {t_representation.ndim}"
if isinstance(self.linear_layers, nn.ModuleList):
n_digits = t_representation.shape[1]
lst_t_logits = [self.linear_layers[digit](t_representation[:,digit,:]) for digit in range(n_digits)]
t_logits = torch.stack(lst_t_logits, dim=1)
else:
t_logits = self.linear_layers.forward(t_representation)
# t_logits: (n_batch, n_digits_so_far, n_ary)
return t_logits
def summary(self):
ret = {
"n_seq_len": self.n_seq_len
}
return ret | [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.arange",
"torch.split",
"torch.index_select",
"torch.nn.init.zeros_",
"torch.matmul",
"torch.nn.Embedding",
"torch.cumsum"
] | 1.8.1 | s-mizuki-nlp/word_sense_disambiguation | e920b37f879e8db8f2b2d7a55e7997bb8b61ff1a |
1.3 | import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from collections import OrderedDict
from maml.utils import update_parameters, tensors_to_device, compute_accuracy
__all__ = ['ModelAgnosticMetaLearning', 'MAML', 'FOMAML']
class ModelAgnosticMetaLearning(object):
"""Meta-learner class for Model-Agnostic Meta-Learning [1].
Parameters
----------
model : `torchmeta.modules.MetaModule` instance
The model.
optimizer : `torch.optim.Optimizer` instance, optional
The optimizer for the outer-loop optimization procedure. This argument
is optional for evaluation.
step_size : float (default: 0.1)
The step size of the gradient descent update for fast adaptation
(inner-loop update).
first_order : bool (default: False)
If `True`, then the first-order approximation of MAML is used.
learn_step_size : bool (default: False)
If `True`, then the step size is a learnable (meta-trained) additional
argument [2].
per_param_step_size : bool (default: False)
If `True`, then the step size parameter is different for each parameter
of the model. Has no impact unless `learn_step_size=True`.
num_adaptation_steps : int (default: 1)
The number of gradient descent updates on the loss function (over the
training dataset) to be used for the fast adaptation on a new task.
scheduler : object in `torch.optim.lr_scheduler`, optional
Scheduler for the outer-loop optimization [3].
loss_function : callable (default: `torch.nn.functional.cross_entropy`)
The loss function for both the inner and outer-loop optimization.
Usually `torch.nn.functional.cross_entropy` for a classification
problem, of `torch.nn.functional.mse_loss` for a regression problem.
device : `torch.device` instance, optional
The device on which the model is defined.
References
----------
.. [1] Finn C., Abbeel P., and Levine, S. (2017). Model-Agnostic Meta-Learning
for Fast Adaptation of Deep Networks. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
.. [2] Li Z., Zhou F., Chen F., Li H. (2017). Meta-SGD: Learning to Learn
Quickly for Few-Shot Learning. (https://arxiv.org/abs/1707.09835)
.. [3] Antoniou A., Edwards H., Storkey A. (2018). How to train your MAML.
International Conference on Learning Representations (ICLR).
(https://arxiv.org/abs/1810.09502)
"""
def __init__(self, model, optimizer=None, step_size=0.1, first_order=False,
learn_step_size=False, per_param_step_size=False,
num_adaptation_steps=1, scheduler=None,
loss_function=F.cross_entropy, device=None):
self.model = model.to(device=device)
self.optimizer = optimizer
self.step_size = step_size
self.first_order = first_order
self.num_adaptation_steps = num_adaptation_steps
self.scheduler = scheduler
self.loss_function = loss_function
self.device = device
if per_param_step_size:
self.step_size = OrderedDict((name, torch.tensor(step_size,
dtype=param.dtype, device=self.device,
requires_grad=learn_step_size)) for (name, param)
in model.meta_named_parameters())
else:
self.step_size = torch.tensor(step_size, dtype=torch.float32,
device=self.device, requires_grad=learn_step_size)
if (self.optimizer is not None) and learn_step_size:
self.optimizer.add_param_group({'params': self.step_size.values()
if per_param_step_size else [self.step_size]})
if scheduler is not None:
for group in self.optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
self.scheduler.base_lrs([group['initial_lr']
for group in self.optimizer.param_groups])
def get_outer_loss(self, batch):
if 'test' not in batch:
raise RuntimeError('The batch does not contain any test dataset.')
_, test_targets = batch['test']
num_tasks = test_targets.size(0)
is_classification_task = (not test_targets.dtype.is_floating_point)
results = {
'num_tasks': num_tasks,
'inner_losses': np.zeros((self.num_adaptation_steps,
num_tasks), dtype=np.float32),
'outer_losses': np.zeros((num_tasks,), dtype=np.float32),
'mean_outer_loss': 0.
}
if is_classification_task:
results.update({
'accuracies_before': np.zeros((num_tasks,), dtype=np.float32),
'accuracies_after': np.zeros((num_tasks,), dtype=np.float32)
})
mean_outer_loss = torch.tensor(0., device=self.device)
for task_id, (train_inputs, train_targets, test_inputs, test_targets) \
in enumerate(zip(*batch['train'], *batch['test'])):
params, adaptation_results = self.adapt(train_inputs, train_targets,
is_classification_task=is_classification_task,
num_adaptation_steps=self.num_adaptation_steps,
step_size=self.step_size, first_order=self.first_order)
results['inner_losses'][:, task_id] = adaptation_results['inner_losses']
if is_classification_task:
results['accuracies_before'][task_id] = adaptation_results['accuracy_before']
with torch.set_grad_enabled(self.model.training):
test_logits = self.model(test_inputs, params=params)
outer_loss = self.loss_function(test_logits, test_targets)
results['outer_losses'][task_id] = outer_loss.item()
mean_outer_loss += outer_loss
if is_classification_task:
results['accuracies_after'][task_id] = compute_accuracy(
test_logits, test_targets)
mean_outer_loss.div_(num_tasks)
results['mean_outer_loss'] = mean_outer_loss.item()
return mean_outer_loss, results
def adapt(self, inputs, targets, is_classification_task=None,
num_adaptation_steps=1, step_size=0.1, first_order=False):
if is_classification_task is None:
is_classification_task = (not targets.dtype.is_floating_point)
params = None
results = {'inner_losses': np.zeros(
(num_adaptation_steps,), dtype=np.float32)}
for step in range(num_adaptation_steps):
logits = self.model(inputs, params=params)
inner_loss = self.loss_function(logits, targets)
results['inner_losses'][step] = inner_loss.item()
if (step == 0) and is_classification_task:
results['accuracy_before'] = compute_accuracy(logits, targets)
self.model.zero_grad()
params = update_parameters(self.model, inner_loss,
step_size=step_size, params=params,
first_order=(not self.model.training) or first_order)
return params, results
def train(self, dataloader, max_batches=500, verbose=True, **kwargs):
with tqdm(total=max_batches, disable=not verbose, **kwargs) as pbar:
for results in self.train_iter(dataloader, max_batches=max_batches):
pbar.update(1)
postfix = {'loss': '{0:.4f}'.format(results['mean_outer_loss'])}
if 'accuracies_after' in results:
postfix['accuracy'] = '{0:.4f}'.format(
np.mean(results['accuracies_after']))
pbar.set_postfix(**postfix)
def train_iter(self, dataloader, max_batches=500):
if self.optimizer is None:
raise RuntimeError('Trying to call `train_iter`, while the '
'optimizer is `None`. In order to train `{0}`, you must '
'specify a Pytorch optimizer as the argument of `{0}` '
'(eg. `{0}(model, optimizer=torch.optim.SGD(model.'
'parameters(), lr=0.01), ...).'.format(__class__.__name__))
num_batches = 0
self.model.train()
while num_batches < max_batches:
for batch in dataloader:
if num_batches >= max_batches:
break
if self.scheduler is not None:
self.scheduler.step(epoch=num_batches)
self.optimizer.zero_grad()
batch = tensors_to_device(batch, device=self.device)
outer_loss, results = self.get_outer_loss(batch)
yield results
outer_loss.backward()
self.optimizer.step()
num_batches += 1
def evaluate(self, dataloader, max_batches=500, verbose=True, **kwargs):
mean_outer_loss, mean_accuracy, count = 0., 0., 0
with tqdm(total=max_batches, disable=not verbose, **kwargs) as pbar:
for results in self.evaluate_iter(dataloader, max_batches=max_batches):
pbar.update(1)
count += 1
mean_outer_loss += (results['mean_outer_loss']
- mean_outer_loss) / count
postfix = {'loss': '{0:.4f}'.format(mean_outer_loss)}
if 'accuracies_after' in results:
mean_accuracy += (np.mean(results['accuracies_after'])
- mean_accuracy) / count
postfix['accuracy'] = '{0:.4f}'.format(mean_accuracy)
pbar.set_postfix(**postfix)
mean_results = {'mean_outer_loss': mean_outer_loss}
if 'accuracies_after' in results:
mean_results['accuracies_after'] = mean_accuracy
return mean_results
def evaluate_iter(self, dataloader, max_batches=500):
num_batches = 0
self.model.eval()
while num_batches < max_batches:
for batch in dataloader:
if num_batches >= max_batches:
break
batch = tensors_to_device(batch, device=self.device)
_, results = self.get_outer_loss(batch)
yield results
num_batches += 1
MAML = ModelAgnosticMetaLearning
class FOMAML(ModelAgnosticMetaLearning):
def __init__(self, model, optimizer=None, step_size=0.1,
learn_step_size=False, per_param_step_size=False,
num_adaptation_steps=1, scheduler=None,
loss_function=F.cross_entropy, device=None):
super(FOMAML, self).__init__(model, optimizer=optimizer, first_order=True,
step_size=step_size, learn_step_size=learn_step_size,
per_param_step_size=per_param_step_size,
num_adaptation_steps=num_adaptation_steps, scheduler=scheduler,
loss_function=loss_function, device=device)
| [
"torch.tensor",
"torch.set_grad_enabled"
] | 1.3.0 | ferhah/pytorch-maml | 8fbf8b200c1c73aeb98787e4df43036955dca323 |
1.0 | # -*- coding: utf-8 -*-
from functools import partial
import torch
from torchtext.data import Field, RawField
from onmt.inputters.dataset_base import DatasetBase
class TextDataset(DatasetBase):
"""
Build `Example` objects, `Field` objects, and filter_pred function
from text corpus.
Args:
fields (dict): a dictionary of `torchtext.data.Field`.
Keys are like 'src', 'tgt', 'src_map', and 'alignment'.
src_examples_iter (dict iter): preprocessed source example
dictionary iterator.
tgt_examples_iter (dict iter): preprocessed target example
dictionary iterator.
dynamic_dict (bool)
"""
@staticmethod
def sort_key(ex):
if hasattr(ex, "tgt"):
if hasattr(ex, "ans"):
return len(ex.src[0]), len(ex.ans[0]), len(ex.tgt[0])
else:
return len(ex.src[0]), len(ex.tgt[0])
else:
if hasattr(ex, "ans"):
return len(ex.src[0]), len(ex.ans[0])
else:
return len(ex.src[0])
@classmethod
def make_examples(cls, sequences, side):
"""
Args:
sequences: path to corpus file or iterable
truncate (int): maximum sequence length (0 for unlimited).
side (str): "src" or "tgt" or "ans".
Yields:
dictionaries whose keys are the names of fields and whose
values are more or less the result of tokenizing with those
fields.
"""
if isinstance(sequences, str):
sequences = cls._read_file(sequences)
for i, seq in enumerate(sequences):
yield {side: seq, "indices": i}
# mix this with partial
def _feature_tokenize(
string, layer=0, tok_delim=None, feat_delim=None, truncate=None):
tokens = string.split(tok_delim)
if truncate is not None:
tokens = tokens[:truncate]
if feat_delim is not None:
tokens = [t.split(feat_delim)[layer] for t in tokens]
return tokens
class TextMultiField(RawField):
def __init__(self, base_name, base_field, feats_fields):
super(TextMultiField, self).__init__()
self.fields = [(base_name, base_field)]
for name, ff in sorted(feats_fields, key=lambda kv: kv[0]):
self.fields.append((name, ff))
@property
def base_field(self):
return self.fields[0][1]
def process(self, batch, device=None):
# batch (list(list(list))): batch_size x len(self.fields) x seq_len
batch_by_feat = list(zip(*batch))
base_data = self.base_field.process(batch_by_feat[0], device=device)
if self.base_field.include_lengths:
# lengths: batch_size
base_data, lengths = base_data
feats = [ff.process(batch_by_feat[i], device=device)
for i, (_, ff) in enumerate(self.fields[1:], 1)]
levels = [base_data] + feats
# data: seq_len x batch_size x len(self.fields)
data = torch.stack(levels, 2)
if self.base_field.include_lengths:
return data, lengths
else:
return data
def preprocess(self, x):
return [f.preprocess(x) for _, f in self.fields]
def __getitem__(self, item):
return self.fields[item]
def text_fields(base_name, **kwargs):
"""Create text fields.
Args:
base_name (str)
n_feats (int)
include_lengths (bool)
pad (str, optional): Defaults to <blank>.
bos (str or NoneType, optional): Defaults to <s>
eos (str or NoneType, optional): Defaults to </s>
truncate (bool or NoneType, optional): Defaults to None.
"""
n_feats = kwargs["n_feats"]
include_lengths = kwargs["include_lengths"]
pad = kwargs.get("pad", "<blank>")
bos = kwargs.get("bos", "<s>")
eos = kwargs.get("eos", "</s>")
truncate = kwargs.get("truncate", None)
fields_ = []
feat_delim = u"│" if n_feats > 0 else None
for i in range(n_feats + 1):
name = base_name + "_feat_" + str(i - 1) if i > 0 else base_name
tokenize = partial(
_feature_tokenize,
layer=i,
truncate=truncate,
feat_delim=feat_delim)
use_len = i == 0 and include_lengths
feat = Field(
init_token=bos, eos_token=eos,
pad_token=pad, tokenize=tokenize,
include_lengths=use_len)
fields_.append((name, feat))
assert fields_[0][0] == base_name # sanity check
field = TextMultiField(fields_[0][0], fields_[0][1], fields_[1:])
return [(base_name, field)]
| [
"torch.stack"
] | 1.0 | kolk/qa_factoid2natural | ccdd0096217c8e88b148f353f0c89628b85f9c4d |
1.8 | # Create by Packetsss
# Personal use is allowed
# Commercial use is prohibited
from __future__ import division
import math
import torch
import torch.optim as optim
from collections import defaultdict
class SharedRMSprop(optim.Optimizer):
"""Implements RMSprop algorithm with shared states.
"""
def __init__(self,
params,
lr=7e-4,
alpha=0.99,
eps=0.1,
weight_decay=0,
momentum=0,
centered=False):
defaults = defaultdict(lr=lr, alpha=alpha, eps=eps,
weight_decay=weight_decay, momentum=momentum, centered=centered)
super(SharedRMSprop, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['grad_avg'] = p.data.new().resize_as_(p.data).zero_()
state['square_avg'] = p.data.new().resize_as_(p.data).zero_()
state['momentum_buffer'] = p.data.new(
).resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['square_avg'].share_memory_()
state['step'].share_memory_()
state['grad_avg'].share_memory_()
state['momentum_buffer'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'RMSprop does not support sparse gradients')
state = self.state[p]
square_avg = state['square_avg']
alpha = group['alpha']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
square_avg.mul_(alpha).addcmul_(1 - alpha, grad, grad)
if group['centered']:
grad_avg = state['grad_avg']
grad_avg.mul_(alpha).add_(1 - alpha, grad)
avg = square_avg.addcmul(
-1, grad_avg, grad_avg).sqrt().add_(group['eps'])
else:
avg = square_avg.sqrt().add_(group['eps'])
if group['momentum'] > 0:
buf = state['momentum_buffer']
buf.mul_(group['momentum']).addcdiv_(grad, avg)
p.data.add_(-group['lr'], buf)
else:
p.data.addcdiv_(-group['lr'], grad, avg)
return loss
class SharedAdam(optim.Optimizer):
"""Implements Adam algorithm with shared states.
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-3,
weight_decay=0, amsgrad=True):
defaults = defaultdict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(SharedAdam, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = torch.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
state['max_exp_avg_sq'] = p.data.new(
).resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
state['max_exp_avg_sq'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till
# now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1**state['step'].item()
bias_correction2 = 1 - beta2**state['step'].item()
step_size = group['lr'] * \
math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| [
"torch.zeros",
"torch.max"
] | 1.8.1 | PaulPan00/donkey_wrapper | a03cf0f42f65625fbce792b06c98acd153c5d6c8 |
1.4 | # -*- coding: utf-8 -*-
import math
from typing import Optional, Tuple
import warnings
import torch
from torch import Tensor
__all__ = [
"spectrogram",
"griffinlim",
"amplitude_to_DB",
"DB_to_amplitude",
"compute_deltas",
"compute_kaldi_pitch",
"create_fb_matrix",
"create_dct",
"compute_deltas",
"detect_pitch_frequency",
"DB_to_amplitude",
"mu_law_encoding",
"mu_law_decoding",
"complex_norm",
"angle",
"magphase",
"phase_vocoder",
'mask_along_axis',
'mask_along_axis_iid',
'sliding_window_cmn',
"spectral_centroid",
]
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
Returns:
Tensor: Dimension (..., freq, time), freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return torch.view_as_real(spec_f)
def griffinlim(
specgram: Tensor,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: float,
normalized: bool,
n_iter: int,
momentum: float,
length: Optional[int],
rand_init: bool
) -> Tensor:
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from `librosa`.
* [1] McFee, Brian, Colin Raffel, Dawen Liang, Daniel PW Ellis, Matt McVicar, Eric Battenberg, and Oriol Nieto.
"librosa: Audio and music signal analysis in python."
In Proceedings of the 14th python in science conference, pp. 18-25. 2015.
* [2] Perraudin, N., Balazs, P., & Søndergaard, P. L.
"A fast Griffin-Lim algorithm,"
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4),
Oct. 2013.
* [3] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Args:
specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
hop_length (int): Length of hop between STFT windows. (
Default: ``win_length // 2``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
normalized (bool): Whether to normalize by magnitude after stft.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge.
length (int or None): Array length of the expected output.
rand_init (bool): Initializes phase randomly if True, to zero otherwise.
Returns:
torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
if normalized:
warnings.warn(
"The argument normalized is not used in Griffin-Lim, "
"and will be removed in v0.9.0 release. To suppress this warning, "
"please use `normalized=False`.")
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
specgram = specgram.pow(1 / power)
# randomly initialize the phase
batch, freq, frames = specgram.size()
if rand_init:
angles = 2 * math.pi * torch.rand(batch, freq, frames)
else:
angles = torch.zeros(batch, freq, frames)
angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \
.to(dtype=specgram.dtype, device=specgram.device)
specgram = specgram.unsqueeze(-1).expand_as(angles)
# And initialize the previous iterate to 0
rebuilt = torch.tensor(0.)
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
# Invert with our current estimate of the phases
inverse = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length).float()
# Rebuild the spectrogram
rebuilt = torch.view_as_real(
torch.stft(
input=inverse,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True,
)
)
# Update our phase estimates
angles = rebuilt
if momentum:
angles = angles - tprev.mul_(momentum / (1 + momentum))
angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles))
# Return the final phase estimates
waveform = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def amplitude_to_DB(
x: Tensor,
multiplier: float,
amin: float,
db_multiplier: float,
top_db: Optional[float] = None
) -> Tensor:
r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
The output of each tensor in a batch depends on the maximum value of that tensor,
and so may return different values for an audio clip split into snippets vs. a full clip.
Args:
x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
the form `(..., freq, time)`. Batched inputs should include a channel dimension and
have the form `(batch, channel, freq, time)`.
multiplier (float): Use 10. for power and 20. for amplitude
amin (float): Number to clamp ``x``
db_multiplier (float): Log10(max(reference value and amin))
top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
Returns:
Tensor: Output tensor in decibel scale
"""
x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
x_db -= multiplier * db_multiplier
if top_db is not None:
# Expand batch
shape = x_db.size()
packed_channels = shape[-3] if x_db.dim() > 2 else 1
x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
# Repack batch
x_db = x_db.reshape(shape)
return x_db
def DB_to_amplitude(
x: Tensor,
ref: float,
power: float
) -> Tensor:
r"""Turn a tensor from the decibel scale to the power/amplitude scale.
Args:
x (Tensor): Input tensor before being converted to power/amplitude scale.
ref (float): Reference which the output will be scaled by.
power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
Returns:
Tensor: Output tensor in power/amplitude scale.
"""
return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
# hertz to mel(f) is 2595. * math.log10(1. + (f / 700.))
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0))
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0))
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
# mel to hertz(mel) is 700. * (10**(mel / 2595.) - 1.)
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
f"The value for `n_mels` ({n_mels}) may be set too high. "
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
def create_dct(
n_mfcc: int,
n_mels: int,
norm: Optional[str]
) -> Tensor:
r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
normalized depending on norm.
Args:
n_mfcc (int): Number of mfc coefficients to retain
n_mels (int): Number of mel filterbanks
norm (str or None): Norm to use (either 'ortho' or None)
Returns:
Tensor: The transformation matrix, to be right-multiplied to
row-wise data of size (``n_mels``, ``n_mfcc``).
"""
# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
n = torch.arange(float(n_mels))
k = torch.arange(float(n_mfcc)).unsqueeze(1)
dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
if norm is None:
dct *= 2.0
else:
assert norm == "ortho"
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(n_mels))
return dct.t()
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
def complex_norm(
complex_tensor: Tensor,
power: float = 1.0
) -> Tensor:
r"""Compute the norm of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`).
Returns:
Tensor: Power of the normed input tensor. Shape of `(..., )`
"""
# Replace by torch.norm once issue is fixed
# https://github.com/pytorch/pytorch/issues/34279
return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)
def angle(
complex_tensor: Tensor
) -> Tensor:
r"""Compute the angle of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
Return:
Tensor: Angle of a complex tensor. Shape of `(..., )`
"""
return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])
def magphase(
complex_tensor: Tensor,
power: float = 1.0
) -> Tuple[Tensor, Tensor]:
r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`)
Returns:
(Tensor, Tensor): The magnitude and phase of the complex tensor
"""
mag = complex_norm(complex_tensor, power)
phase = angle(complex_tensor)
return mag, phase
def phase_vocoder(
complex_specgrams: Tensor,
rate: float,
phase_advance: Tensor
) -> Tensor:
r"""Given a STFT tensor, speed up in time without modifying pitch by a
factor of ``rate``.
Args:
complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)`
rate (float): Speed-up factor
phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1)
Returns:
Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)`
Example
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time, complex=2)
>>> complex_specgrams = torch.randn(2, freq, 300, 2)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231, 2])
"""
# pack batch
shape = complex_specgrams.size()
complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:]))
time_steps = torch.arange(0,
complex_specgrams.size(-2),
rate,
device=complex_specgrams.device,
dtype=complex_specgrams.dtype)
alphas = time_steps % 1.0
phase_0 = angle(complex_specgrams[..., :1, :])
# Time Padding
complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])
# (new_bins, freq, 2)
complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())
complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())
angle_0 = angle(complex_specgrams_0)
angle_1 = angle(complex_specgrams_1)
norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)
norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)
phase = angle_1 - angle_0 - phase_advance
phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
# Compute Phase Accum
phase = phase + phase_advance
phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
phase_acc = torch.cumsum(phase, -1)
mag = alphas * norm_1 + (1 - alphas) * norm_0
real_stretch = mag * torch.cos(phase_acc)
imag_stretch = mag * torch.sin(phase_acc)
complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)
# unpack batch
complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:])
return complex_specgrams_stretch
def mask_along_axis_iid(
specgrams: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis != 2 and axis != 3:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def mask_along_axis(
specgram: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgram (Tensor): Real spectrogram (channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
Returns:
Tensor: Masked spectrogram of dimensions (channel, freq, time)
"""
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
value = torch.rand(1) * mask_param
min_value = torch.rand(1) * (specgram.size(axis) - value)
mask_start = (min_value.long()).squeeze()
mask_end = (min_value.long() + value.long()).squeeze()
assert mask_end - mask_start < mask_param
if axis == 1:
specgram[:, mask_start:mask_end] = mask_value
elif axis == 2:
specgram[:, :, mask_start:mask_end] = mask_value
else:
raise ValueError('Only Frequency and Time masking are supported')
# unpack batch
specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
return specgram
def compute_deltas(
specgram: Tensor,
win_length: int = 5,
mode: str = "replicate"
) -> Tensor:
r"""Compute delta coefficients of a tensor, usually a spectrogram:
.. math::
d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
where :math:`d_t` is the deltas at time :math:`t`,
:math:`c_t` is the spectrogram coeffcients at time :math:`t`,
:math:`N` is ``(win_length-1)//2``.
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time)
win_length (int, optional): The window length used for computing delta (Default: ``5``)
mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time)
Example
>>> specgram = torch.randn(1, 40, 1000)
>>> delta = compute_deltas(specgram)
>>> delta2 = compute_deltas(delta)
"""
device = specgram.device
dtype = specgram.dtype
# pack batch
shape = specgram.size()
specgram = specgram.reshape(1, -1, shape[-1])
assert win_length >= 3
n = (win_length - 1) // 2
# twice sum of integer squared
denom = n * (n + 1) * (2 * n + 1) / 3
specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
# unpack batch
output = output.reshape(shape)
return output
def _compute_nccf(
waveform: Tensor,
sample_rate: int,
frame_time: float,
freq_low: int
) -> Tensor:
r"""
Compute Normalized Cross-Correlation Function (NCCF).
.. math::
\phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
where
:math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
:math:`w` is the waveform,
:math:`N` is the length of a frame,
:math:`b_i` is the beginning of frame :math:`i`,
:math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
"""
EPSILON = 10 ** (-9)
# Number of lags to check
lags = int(math.ceil(sample_rate / freq_low))
frame_size = int(math.ceil(sample_rate * frame_time))
waveform_length = waveform.size()[-1]
num_of_frames = int(math.ceil(waveform_length / frame_size))
p = lags + num_of_frames * frame_size - waveform_length
waveform = torch.nn.functional.pad(waveform, (0, p))
# Compute lags
output_lag = []
for lag in range(1, lags + 1):
s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
output_frames = (
(s1 * s2).sum(-1)
/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
)
output_lag.append(output_frames.unsqueeze(-1))
nccf = torch.cat(output_lag, -1)
return nccf
def _combine_max(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor],
thresh: float = 0.99
) -> Tuple[Tensor, Tensor]:
"""
Take value from first if bigger than a multiplicative factor of the second, elementwise.
"""
mask = (a[0] > thresh * b[0])
values = mask * a[0] + ~mask * b[0]
indices = mask * a[1] + ~mask * b[1]
return values, indices
def _find_max_per_frame(
nccf: Tensor,
sample_rate: int,
freq_high: int
) -> Tensor:
r"""
For each frame, take the highest value of NCCF,
apply centered median smoothing, and convert to frequency.
Note: If the max among all the lags is very close
to the first half of lags, then the latter is taken.
"""
lag_min = int(math.ceil(sample_rate / freq_high))
# Find near enough max that is smallest
best = torch.max(nccf[..., lag_min:], -1)
half_size = nccf.shape[-1] // 2
half = torch.max(nccf[..., lag_min:half_size], -1)
best = _combine_max(half, best)
indices = best[1]
# Add back minimal lag
indices += lag_min
# Add 1 empirical calibration offset
indices += 1
return indices
def _median_smoothing(
indices: Tensor,
win_length: int
) -> Tensor:
r"""
Apply median smoothing to the 1D tensor over the given window.
"""
# Centered windowed
pad_length = (win_length - 1) // 2
# "replicate" padding in any dimension
indices = torch.nn.functional.pad(
indices, (pad_length, 0), mode="constant", value=0.
)
indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
return values
def detect_pitch_frequency(
waveform: Tensor,
sample_rate: int,
frame_time: float = 10 ** (-2),
win_length: int = 30,
freq_low: int = 85,
freq_high: int = 3400,
) -> Tensor:
r"""Detect pitch frequency.
It is implemented using normalized cross-correlation function and median smoothing.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
sample_rate (int): The sample rate of the waveform (Hz)
frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
# pack batch
shape = list(waveform.size())
waveform = waveform.reshape([-1] + shape[-1:])
nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
indices = _find_max_per_frame(nccf, sample_rate, freq_high)
indices = _median_smoothing(indices, win_length)
# Convert indices to frequency
EPSILON = 10 ** (-9)
freq = sample_rate / (EPSILON + indices.to(torch.float))
# unpack batch
freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
return freq
def sliding_window_cmn(
waveform: Tensor,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False,
) -> Tensor:
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
input_shape = waveform.shape
num_frames, num_feats = input_shape[-2:]
waveform = waveform.view(-1, num_frames, num_feats)
num_channels = waveform.shape[0]
dtype = waveform.dtype
device = waveform.device
last_window_start = last_window_end = -1
cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cmn_waveform = torch.zeros(
num_channels, num_frames, num_feats, dtype=dtype, device=device)
for t in range(num_frames):
window_start = 0
window_end = 0
if center:
window_start = t - cmn_window // 2
window_end = window_start + cmn_window
else:
window_start = t - cmn_window
window_end = t + 1
if window_start < 0:
window_end -= window_start
window_start = 0
if not center:
if window_end > t:
window_end = max(t + 1, min_cmn_window)
if window_end > num_frames:
window_start -= (window_end - num_frames)
window_end = num_frames
if window_start < 0:
window_start = 0
if last_window_start == -1:
input_part = waveform[:, window_start: window_end - window_start, :]
cur_sum += torch.sum(input_part, 1)
if norm_vars:
cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]
else:
if window_start > last_window_start:
frame_to_remove = waveform[:, last_window_start, :]
cur_sum -= frame_to_remove
if norm_vars:
cur_sumsq -= (frame_to_remove ** 2)
if window_end > last_window_end:
frame_to_add = waveform[:, last_window_end, :]
cur_sum += frame_to_add
if norm_vars:
cur_sumsq += (frame_to_add ** 2)
window_frames = window_end - window_start
last_window_start = window_start
last_window_end = window_end
cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum / window_frames
if norm_vars:
if window_frames == 1:
cmn_waveform[:, t, :] = torch.zeros(
num_channels, num_feats, dtype=dtype, device=device)
else:
variance = cur_sumsq
variance = variance / window_frames
variance -= ((cur_sum ** 2) / (window_frames ** 2))
variance = torch.pow(variance, -0.5)
cmn_waveform[:, t, :] *= variance
cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats))
if len(input_shape) == 2:
cmn_waveform = cmn_waveform.squeeze(0)
return cmn_waveform
def spectral_centroid(
waveform: Tensor,
sample_rate: int,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
) -> Tensor:
r"""
Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
sample_rate (int): Sample rate of the audio waveform
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
Returns:
Tensor: Dimension (..., time)
"""
specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, power=1., normalized=False)
freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,
device=specgram.device).reshape((-1, 1))
freq_dim = -2
return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
def compute_kaldi_pitch(
waveform: torch.Tensor,
sample_rate: float,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_f0: float = 50,
max_f0: float = 400,
soft_min_f0: float = 10.0,
penalty_factor: float = 0.1,
lowpass_cutoff: float = 1000,
resample_frequency: float = 4000,
delta_pitch: float = 0.005,
nccf_ballast: float = 7000,
lowpass_filter_width: int = 1,
upsample_filter_width: int = 5,
max_frames_latency: int = 0,
frames_per_chunk: int = 0,
simulate_first_pass_online: bool = False,
recompute_frame: int = 500,
snip_edges: bool = True,
) -> torch.Tensor:
"""Extract pitch based on method described in [1].
This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.
Args:
waveform (Tensor):
The input waveform of shape `(..., time)`.
sample_rate (float):
Sample rate of `waveform`.
frame_length (float, optional):
Frame length in milliseconds. (default: 25.0)
frame_shift (float, optional):
Frame shift in milliseconds. (default: 10.0)
min_f0 (float, optional):
Minimum F0 to search for (Hz) (default: 50.0)
max_f0 (float, optional):
Maximum F0 to search for (Hz) (default: 400.0)
soft_min_f0 (float, optional):
Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)
penalty_factor (float, optional):
Cost factor for FO change. (default: 0.1)
lowpass_cutoff (float, optional):
Cutoff frequency for LowPass filter (Hz) (default: 1000)
resample_frequency (float, optional):
Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff.
(default: 4000)
delta_pitch( float, optional):
Smallest relative change in pitch that our algorithm measures. (default: 0.005)
nccf_ballast (float, optional):
Increasing this factor reduces NCCF for quiet frames (default: 7000)
lowpass_filter_width (int, optional):
Integer that determines filter width of lowpass filter, more gives sharper filter.
(default: 1)
upsample_filter_width (int, optional):
Integer that determines filter width when upsampling NCCF. (default: 5)
max_frames_latency (int, optional):
Maximum number of frames of latency that we allow pitch tracking to introduce into
the feature processing (affects output only if ``frames_per_chunk > 0`` and
``simulate_first_pass_online=True``) (default: 0)
frames_per_chunk (int, optional):
The number of frames used for energy normalization. (default: 0)
simulate_first_pass_online (bool, optional):
If true, the function will output features that correspond to what an online decoder
would see in the first pass of decoding -- not the final version of the features,
which is the default. (default: False)
Relevant if ``frames_per_chunk > 0``.
recompute_frame (int, optional):
Only relevant for compatibility with online pitch extraction.
A non-critical parameter; the frame at which we recompute some of the forward pointers,
after revising our estimate of the signal energy.
Relevant if ``frames_per_chunk > 0``. (default: 500)
snip_edges (bool, optional):
If this is set to false, the incomplete frames near the ending edge won't be snipped,
so that the number of frames is the file size divided by the frame-shift.
This makes different types of features give the same number of frames. (default: True)
Returns:
Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension
corresponds to pitch and NCCF.
Reference:
- A pitch extraction algorithm tuned for automatic speech recognition
P. Ghahremani, B. BabaAli, D. Povey, K. Riedhammer, J. Trmal and S. Khudanpur
2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),
Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049.
"""
shape = waveform.shape
waveform = waveform.reshape(-1, shape[-1])
result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(
waveform, sample_rate, frame_length, frame_shift,
min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,
resample_frequency, delta_pitch, nccf_ballast,
lowpass_filter_width, upsample_filter_width, max_frames_latency,
frames_per_chunk, simulate_first_pass_online, recompute_frame,
snip_edges,
)
result = result.reshape(shape[:-1] + result.shape[-2:])
return result
| [
"torch.round",
"torch.cat",
"torch.view_as_real",
"torch.stack",
"torch.istft",
"torch.nn.functional.pad",
"torch.stft",
"torch.sum",
"torch.log1p",
"torch.norm",
"torch.abs",
"torch.tensor",
"torch.zeros",
"torch.cos",
"torch.min",
"torch.max",
"torch.linspace",
"torch.clamp",
"torch.nn.functional.conv1d",
"torch.cumsum",
"torch.pow",
"torch.rand",
"torch.sin",
"torch.arange",
"torch.ops.torchaudio.kaldi_ComputeKaldiPitch",
"torch.sign",
"torch.atan2",
"torch.median"
] | 1.4.0 | worc3131/audio | 05bff83fdec3e8f70f80bf7a1b89951bf7050114 |
1.0 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import random
import re
import shutil
import sys
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data import DataLoader, Dataset, IterableDataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from . import __version__
from .configuration_utils import PretrainedConfig
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .debug_utils import DebugOption, DebugUnderflowOverflow
from .deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
from .dependency_versions_check import dep_version_check
from .file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
PushToHubMixin,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
)
from .modelcard import TrainingSummary
from .modeling_utils import PreTrainedModel, unwrap_model
from .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
number_of_arguments,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
_is_torch_generator_available = False
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_torch_generator_available = True
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.Dataset` or :obj:`torch.utils.data.IterableDataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
Note that if it's a :obj:`torch.utils.data.IterableDataset` with some randomization and you are training in
a distributed fashion, your iterable dataset should either use a internal attribute :obj:`generator` that
is a :obj:`torch.Generator` for the randomization that must be identical on all processes (and the Trainer
will manually set the seed of this :obj:`generator` at each epoch) or have a :obj:`set_epoch()` method that
internally sets the seed of the RNGs used.
eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
self._move_model_to_device(model, args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create clone of distant repo and output directory if needed
if self.args.push_to_hub:
self.init_git_repo()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
if is_sagemaker_mp_enabled():
self.scaler = smp.amp.GradScaler()
elif self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _move_model_to_device(self, model, device):
model = model.to(device)
# Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
model.tie_weights()
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if not isinstance(self.train_dataset, collections.abc.Sized):
return None
generator = None
if self.args.world_size <= 1 and _is_torch_generator_available:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=self.args.seed,
)
else:
if self.args.world_size <= 1:
if _is_torch_generator_available:
return RandomSampler(self.train_dataset, generator=generator)
return RandomSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
if isinstance(train_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
if isinstance(test_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
for key, value in params.items():
if not hasattr(self.args, key):
logger.warn(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
continue
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.deepspeed import HfDeepSpeedConfig
self.args.hf_deepspeed_config = HfDeepSpeedConfig(self.args)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = number_of_arguments(self.model_init)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (:obj:`List[str]`, `optional`)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
resume_from_checkpoint = None if not resume_from_checkpoint else resume_from_checkpoint
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if args.fp16_full_eval and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
# release memory
del state_dict
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
if os.path.exists(best_model_path):
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
else:
logger.warn(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
def _load_state_dict_in_model(self, state_dict):
load_result = self.model.load_state_dict(state_dict, strict=False)
if len(load_result.missing_keys) != 0:
if set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank != -1:
rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_fp16_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
if smp.dp_rank() == 0:
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.args.should_save:
torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
elif self.args.should_save and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank == -1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{local_rank}.pth"))
# Maybe delete some older checkpoints.
if self.args.should_save:
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.use_amp and os.path.isfile(os.path.join(checkpoint, "scaler.pt")):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, "scaler.pt")))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
kwargs = dict(device=self.args.device)
if self.deepspeed and inputs[k].dtype != torch.int64:
# NLP models inputs are int64 and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update(dict(dtype=self.args.hf_deepspeed_config.dtype()))
inputs[k] = v.to(**kwargs)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
scaler = self.scaler if self.use_amp else None
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.args.should_save:
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_fp16_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME)
elif self.args.should_save:
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.args.should_save,
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.args.should_save, save_function=xm.save)
if self.tokenizer is not None and self.args.should_save:
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += distributed_broadcast_scalars([self.current_flos]).sum().item()
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
if tensor.shape[1] == max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def init_git_repo(self):
"""
Initializes a git repo in :obj:`self.args.push_to_hub_model_id`.
"""
if not self.args.should_save:
return
use_auth_token = True if self.args.push_to_hub_token is None else self.args.push_to_hub_token
repo_url = PushToHubMixin._get_repo_url_from_name(
self.args.push_to_hub_model_id,
organization=self.args.push_to_hub_organization,
use_auth_token=use_auth_token,
)
self.repo = PushToHubMixin._create_or_get_repo(
self.args.output_dir, repo_url=repo_url, use_auth_token=use_auth_token
)
# By default, ignore the checkpoint folders
if not os.path.exists(os.path.join(self.args.output_dir, ".gitignore")):
with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
writer.writelines(["checkpoint-*/"])
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Optional[str] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
tasks: Optional[str] = None,
dataset_tags: Optional[Union[str, List[str]]] = None,
dataset: Optional[Union[str, List[str]]] = None,
dataset_args: Optional[Union[str, List[str]]] = None,
):
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def push_to_hub(self, commit_message: Optional[str] = "add model", **kwargs) -> str:
"""
Upload `self.model` and `self.tokenizer` to the 🤗 model hub on the repo `self.args.push_to_hub_model_id`.
Parameters:
commit_message (:obj:`str`, `optional`, defaults to :obj:`"add model"`):
Message to commit while pushing.
kwargs:
Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`.
Returns:
The url of the commit of your model in the given repository.
"""
if self.args.should_save:
self.create_model_card(model_name=self.args.push_to_hub_model_id, **kwargs)
# Needs to be executed on all processes for TPU training, but will only save on the processed determined by
# self.args.should_save.
self.save_model()
# Only push from one node.
if not self.is_world_process_zero():
return
return self.repo.push_to_hub(commit_message=commit_message)
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
| [
"torch.cat",
"torch.utils.data.RandomSampler",
"torch.cuda.amp.autocast",
"torch.Generator",
"torch.cuda.random.set_rng_state",
"torch.random.get_rng_state",
"torch.cuda.random.set_rng_state_all",
"torch.cuda.is_available",
"torch.load",
"torch.cuda.random.get_rng_state_all",
"torch.nn.DataParallel",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.empty",
"torch.random.set_rng_state",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.amp.GradScaler",
"torch.distributed.barrier",
"torch.cuda.random.get_rng_state",
"torch.distributed.get_local_rank",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.0 | mweiss17/transformers | ae88d5adc89a2020c21d62481e98f058f91784aa |
1.1 | import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
class ChannelPoolAdaptiveAvg1d(torch.nn.AdaptiveAvgPool1d):
def forward(self, input):
n, c = input.size()
input = input.view(n,c,1).permute(0,2,1)
pooled = torch.nn.functional.adaptive_avg_pool1d(input, self.output_size)
_, _, c = pooled.size()
pooled = pooled.permute(0,2,1)
return pooled.view(n,c)
class Img2Vec():
def __init__(self, cuda=False, model='resnet-18', layer='default', layer_output_size=512):
""" Img2Vec
:param cuda: If set to True, will run forward pass on GPU
:param model: String name of requested model
:param layer: String or Int depending on model. See more docs: https://github.com/christiansafka/img2vec.git
:param layer_output_size: Int depicting the output size of the requested layer
"""
self.device = torch.device("cuda" if cuda else "cpu")
self.layer_output_size = layer_output_size
self.model_name = model
self.model, self.extraction_layer = self._get_model_and_layer(model, layer)
self.model = self.model.to(self.device)
self.model.eval()
self.scaler = transforms.Scale((224, 224))
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.to_tensor = transforms.ToTensor()
def get_vec(self, img, tensor=False):
""" Get vector embedding from PIL image
:param img: PIL Image
:param tensor: If True, get_vec will return a FloatTensor instead of Numpy array
:returns: Numpy ndarray
"""
image = self.normalize(self.to_tensor(self.scaler(img))).unsqueeze(0).to(self.device)
if self.model_name in ('alexnet', 'mobilenet', 'resnext-50-small'):
my_embedding = torch.zeros(1, self.layer_output_size)
elif self.model_name in ('resnet-18', 'resnext-50'):
my_embedding = torch.zeros(1, self.layer_output_size)
def copy_data(m, i, o):
my_embedding.copy_(o.data)
h = self.extraction_layer.register_forward_hook(copy_data)
h_x = self.model(image)
h.remove()
if tensor:
return my_embedding
else:
if self.model_name in ('alexnet', 'mobilenet', 'resnext-50-small'):
return my_embedding.numpy()[0, :]
elif self.model_name in ('resnet-18', 'resnext-50'):
return my_embedding.numpy()[0, :, 0, 0]
def _get_model_and_layer(self, model_name, layer):
""" Internal method for getting layer from model
:param model_name: model name such as 'resnet-18'
:param layer: layer as a string for resnet-18 or int for alexnet
:returns: pytorch model, selected layer
"""
if model_name == 'resnext-50-small':
model = models.resnext50_32x4d(pretrained=True)
if layer == 'default':
#b = torch.nn.AvgPool2d(kernel_size=(8,8),stride=(4,4))
#a = torch.nn.AvgPool2d(kernel_size=(2,2),stride=2)
#model.avgpool = b
#model.fc = nn.Identity()
#layer = model.avgpool
model.fc = ChannelPoolAdaptiveAvg1d(output_size=512)
layer = model.fc
self.layer_output_size = 512
else:
layer = model._modules.get(layer)
return model, layer
if model_name == 'resnext-50':
model = models.resnext50_32x4d(pretrained=True)
if layer == 'default':
layer = model._modules.get('avgpool')
self.layer_output_size = 2048
else:
layer = model._modules.get(layer)
return model, layer
if model_name == 'resnet-18':
model = models.resnet18(pretrained=True)
if layer == 'default':
layer = model._modules.get('avgpool')
self.layer_output_size = 512
else:
layer = model._modules.get(layer)
return model, layer
# @TODO: Fix or remove, this is both slow and inaccurate, not sure where we'd use it
if model_name == 'alexnet':
model = models.alexnet(pretrained=True)
if layer == 'default':
layer = model.classifier[-2]
self.layer_output_size = 4096
else:
layer = model.classifier[-layer]
return model, layer
# @TODO: Fix or remove, this is slow and not quite as accurate as resnet18, it's a failed experiment trying to end the encoder with the output from an FC rather than output from the pooling layer, might work on it later, if 1 month from now it stays the same, just remove it
if model_name == 'mobilenet':
model = models.mobilenet_v2(pretrained=True)
if layer == 'default':
layer = model._modules.get('classifier')
self.layer_output_size = 1000
else:
layer = model._modules.get(layer)
return model, layer
else:
raise KeyError('Model %s was not found' % model_name)
| [
"torch.zeros",
"torch.device",
"torch.nn.functional.adaptive_avg_pool1d"
] | 1.1.0 | abitrolly/lightwood | ee0c095f594c5d491196401b59344702f346bc9c |
1.3 | import torch
import torch.nn.functional as F
import configparser
import matplotlib.pyplot as plt
import os
import numpy as np
plt.figure(figsize=(20,5))
import ast
import sys
#to import parent-level modules
os.chdir('Temporal-Based Approach')
sys.path.append('..')
from model.StackedLSTM import StackedLSTM
from utils.normalize_testdata import normalize_testdata
from influence import prediction_with_influence
from postprocessing.postprocessing import PostProcessing
#read the configuration file
config = configparser.ConfigParser()
config.read('config.ini')
batch_size = int(config['Test']['test_batch_size'])
input_size = int(config['Common']['input_size'])
hidden_size = int(config['Common']['hidden_size'])
num_layer = int(config['Common']['num_layer'])
sequence_length = int(config['Common']['sequence_length'])
output_size = int(config['Common']['output_size'])
test_dir = config['Test']['test_dir']
weights_loc = config['Common']['weights_loc']
initial_seq = torch.Tensor(ast.literal_eval(config['Save']['initial_seq']))
# max_note = int(config['Save']['max_note'])
# min_note = int(config['Save']['min_note'])
#check if CUDA is available
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
initial_seq = initial_seq.to(device)
#load the weights of the LSTM model
model = StackedLSTM(input_size,hidden_size,num_layer,output_size, batch_size)
model.load_state_dict(torch.load('{}'.format(weights_loc)))
#set the model in evaluation mode
model.eval()
model.to(device)
test_list = os.listdir(test_dir)
for each_test_file in test_list:
test_file_path = os.path.join(test_dir,each_test_file).replace('\\','/')
testing_data = np.array(normalize_testdata(test_file_path, 50, 89))
predicted_notes_list = prediction_with_influence(model, testing_data, initial_seq)
print(predicted_notes_list)
#convert tensor to list
for i in range(len(predicted_notes_list)):
predicted_notes_list[i]=predicted_notes_list[i].detach().cpu().numpy().tolist()
postprocessing = PostProcessing()
postprocessing.stich_notes(predicted_notes_list)
postprocessing.music_generation(testing_data*89, each_test_file)
| [
"torch.cuda.is_available"
] | 1.3.0 | uvashisth/Sonification-using-Deep-Learning | a0917e785c35aa5fadcbb258e938c58071b4e482 |
0.4 | from torch import nn
class GPT2LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model_embeddings_weights, config):
super(GPT2LMHead, self).__init__()
self.n_embd = config.n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
# Truncated Language modeling logits (we remove the last token)
# h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
lm_logits = self.decoder(hidden_state)
return lm_logits
class GPT2MultipleChoiceHead(nn.Module):
""" Classifier Head for the transformer """
def __init__(self, config):
super(GPT2MultipleChoiceHead, self).__init__()
self.n_embd = config.n_embd
self.linear = nn.Linear(config.n_embd, 1)
nn.init.normal_(self.linear.weight, std=0.02)
nn.init.normal_(self.linear.bias, 0)
def forward(self, hidden_states, mc_token_ids):
# Classification logits
# hidden_state (bsz, num_choices, seq_length, hidden_size)
# mc_token_ids (bsz, num_choices)
mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
# (bsz, num_choices, 1, hidden_size)
multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
# (bsz, num_choices, hidden_size)
multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
# (bsz, num_choices)
return multiple_choice_logits
| [
"torch.nn.Linear",
"torch.nn.init.normal_"
] | 0.4.1 | kniazevgeny/GPT2sQA-1 | b319016f51847a25ea3e9cd9c8450d58831cc42a |
1.7 | import torch
from torch import nn
import torch.nn.functional as F
class ConvNormLReLU(nn.Sequential):
def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=1, pad_mode="reflect", groups=1, bias=False):
pad_layer = {
"zero": nn.ZeroPad2d,
"same": nn.ReplicationPad2d,
"reflect": nn.ReflectionPad2d,
}
if pad_mode not in pad_layer:
raise NotImplementedError
super(ConvNormLReLU, self).__init__(
pad_layer[pad_mode](padding),
nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=0, groups=groups, bias=bias),
#nn.GroupNorm(num_groups=1, num_channels=out_ch, affine=True),
nn.InstanceNorm2d(num_features=out_ch, affine=True),
nn.LeakyReLU(0.2, inplace=True)
)
class InvertedResBlock(nn.Module):
def __init__(self, in_ch, out_ch, expansion_ratio=2):
super(InvertedResBlock, self).__init__()
self.use_res_connect = in_ch == out_ch
bottleneck = int(round(in_ch*expansion_ratio))
layers = []
if expansion_ratio != 1:
layers.append(ConvNormLReLU(in_ch, bottleneck, kernel_size=1, padding=0))
# dw
layers.append(ConvNormLReLU(bottleneck, bottleneck, groups=bottleneck, bias=True))
# pw
layers.append(nn.Conv2d(bottleneck, out_ch, kernel_size=1, padding=0, bias=False))
# layers.append(nn.GroupNorm(num_groups=1, num_channels=out_ch, affine=True))
layers.append(nn.InstanceNorm2d(num_features=out_ch, affine=True))
self.layers = nn.Sequential(*layers)
def forward(self, input):
out = self.layers(input)
if self.use_res_connect:
out = input + out
return out
class Generator(nn.Module):
def __init__(self, ):
super().__init__()
self.block_a = nn.Sequential(
ConvNormLReLU(3, 32, kernel_size=7, padding=3),
ConvNormLReLU(32, 64, stride=2, padding=(0,1,0,1)),
ConvNormLReLU(64, 64)
)
self.block_b = nn.Sequential(
ConvNormLReLU(64, 128, stride=2, padding=(0,1,0,1)),
ConvNormLReLU(128, 128)
)
self.block_c = nn.Sequential(
ConvNormLReLU(128, 128),
InvertedResBlock(128, 256, 2),
InvertedResBlock(256, 256, 2),
InvertedResBlock(256, 256, 2),
InvertedResBlock(256, 256, 2),
ConvNormLReLU(256, 128),
)
self.block_d = nn.Sequential(
ConvNormLReLU(128, 128),
ConvNormLReLU(128, 128)
)
self.block_e = nn.Sequential(
ConvNormLReLU(128, 64),
ConvNormLReLU(64, 64),
ConvNormLReLU(64, 32, kernel_size=7, padding=3)
)
self.out_layer = nn.Sequential(
nn.Conv2d(32, 3, kernel_size=1, stride=1, padding=0, bias=False),
nn.Tanh()
)
def forward(self, input, align_corners=False):
out = self.block_a(input)
half_size = out.size()[-2:]
out = self.block_b(out)
out = self.block_c(out)
if align_corners:
out = F.interpolate(out, half_size, mode="bilinear", align_corners=True)
else:
out = F.interpolate(out, scale_factor=2, mode='nearest')
out = self.block_d(out)
if align_corners:
out = F.interpolate(out, input.size()[-2:], mode="bilinear", align_corners=True)
else:
out = F.interpolate(out, scale_factor=2, mode='nearest')
out = self.block_e(out)
out = self.out_layer(out)
return out
| [
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.functional.interpolate",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d"
] | 1.7.1 | viztopia/animegan2-pytorch | cef7f42fa429cfc902c3b419d343389e690943f5 |
0.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel,
prune_linear_layer, add_start_docstrings)
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
}
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
}
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
@torch.jit.script
def f_gelu(x):
"""Fused gelu func"""
return x * 0.5 * (1.0 + torch.erf(x / 1.41421))
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return f_gelu(x)
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(PretrainedConfig):
r"""
:class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r""" The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model.
"""
BERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertModel(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertModel(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMaskedLM(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, masked_lm_labels=input_ids)
>>> loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention is they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForNextSentencePrediction(BertPreTrainedModel):
r"""
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Next sequence prediction (classification) loss.
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForNextSentencePrediction(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> seq_relationship_scores = outputs[0]
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForSequenceClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING)
class BertForMultipleChoice(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.Tensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMultipleChoice(config)
>>> choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMultipleChoice, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForTokenClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForTokenClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, scores = outputs[:2]
"""
def __init__(self, config):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForQuestionAnswering(BertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForQuestionAnswering(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| [
"torch.nn.Linear",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.sigmoid",
"torch.sqrt",
"torch.nn.Softmax",
"torch.zeros_like",
"torch.zeros",
"torch.nn.Tanh",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.erf",
"torch.from_numpy",
"torch.ones_like",
"torch.nn.Embedding"
] | 0.4.1 | jiahuanglin/pytorch-transformers | a7a93143df4e60e31e062d7f2a4eb0d6283473a4 |
1.0 | # TvNorm.py
import torch
import torch.nn as nn
class TvNorm(nn.Module):
"""
normalization using the total variation; idea is to normalize pixel-wise by the length of the feature vector, i.e.,
MATLAB notation:
z = diag( 1/ sqrt( sum(x.^2,3)+eps)) x
Attributes:
eps: small float so no division by 0
weight: scaling weight for the affine transformation
bias: bias for the affine transformation
"""
def __init__(self, nChan, eps=1e-4):
"""
:param nChan: number of channels for the data you expect to normalize
:param eps: small float so no division by 0
"""
super().__init__()
self.eps = eps
# Tv Norm has no tuning of the scaling weights
# self.weight = nn.Parameter(torch.ones(nChan))
self.register_buffer('weight', torch.ones(nChan))
self.bias = nn.Parameter(torch.zeros(nChan))
def forward(self,x):
"""
:param x: inputs tensor, second dim is channels
example dims: (num images in the batch , num channels, height , width)
:return: normalized version with same dimensions as x
"""
z = torch.pow(x, 2)
z = torch.div(x, torch.sqrt(torch.sum(z, dim=1, keepdim=True) + self.eps))
# assumes that Tensor is formatted ( something , no. of channels, something, something, etc.)
if self.weight is not None:
w = self.weight.unsqueeze(0) # add first dimension
w = w.unsqueeze(-1) # add last dimension
w = w.unsqueeze(-1) # add last dimension
z = z * w
if self.bias is not None:
b = self.bias.unsqueeze(0) # add first dimension
b = b.unsqueeze(-1) # add last dimension
b = b.unsqueeze(-1) # add last dimension
z = z + b
return z
| [
"torch.ones",
"torch.zeros",
"torch.sum",
"torch.pow"
] | 1.0.1 | EmoryMLIP/DynamicBlocks | 52acc9fbc1a2640c6ac8922fa18105279ccaea97 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer to automate the training."""
import logging
import warnings
from itertools import count
from pathlib import Path
from traceback import print_exc
from typing import Any, Dict, Iterable, List, Optional, Union
import torch
from torch.utils.data import DataLoader
from pytorch_lightning.accelerators import Accelerator
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.plugins import Plugin
from pytorch_lightning.profiler import BaseProfiler
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
from pytorch_lightning.trainer.configuration_validator import ConfigValidator
from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector
from pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector
from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector
from pytorch_lightning.trainer.connectors.data_connector import DataConnector
from pytorch_lightning.trainer.connectors.debugging_connector import DebuggingConnector
from pytorch_lightning.trainer.connectors.env_vars_connector import _defaults_from_env_vars
from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector
from pytorch_lightning.trainer.connectors.model_connector import ModelConnector
from pytorch_lightning.trainer.connectors.optimizer_connector import OptimizerConnector
from pytorch_lightning.trainer.connectors.profiler_connector import ProfilerConnector
from pytorch_lightning.trainer.connectors.slurm_connector import SLURMConnector
from pytorch_lightning.trainer.connectors.training_trick_connector import TrainingTricksConnector
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.deprecated_api import DeprecatedDistDeviceAttributes, DeprecatedTrainerAttributes
from pytorch_lightning.trainer.evaluation_loop import EvaluationLoop
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
from pytorch_lightning.trainer.predict_loop import PredictLoop
from pytorch_lightning.trainer.properties import TrainerProperties
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.trainer.training_loop import TrainLoop
from pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin
from pytorch_lightning.tuner.tuning import Tuner
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.debugging import InternalDebugger
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
log = logging.getLogger(__name__)
# warnings to ignore in trainer
warnings.filterwarnings(
'ignore', message='torch.distributed.reduce_op is deprecated, '
'please use torch.distributed.ReduceOp instead'
)
class Trainer(
TrainerProperties,
TrainerCallbackHookMixin,
TrainerModelHooksMixin,
TrainerOptimizersMixin,
TrainerLoggingMixin,
TrainerTrainingTricksMixin,
TrainerDataLoadingMixin,
DeprecatedDistDeviceAttributes,
DeprecatedTrainerAttributes,
):
@_defaults_from_env_vars
def __init__(
self,
logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,
checkpoint_callback: bool = True,
callbacks: Optional[Union[List[Callback], Callback]] = None,
default_root_dir: Optional[str] = None,
gradient_clip_val: float = 0,
process_position: int = 0,
num_nodes: int = 1,
num_processes: int = 1,
gpus: Optional[Union[List[int], str, int]] = None,
auto_select_gpus: bool = False,
tpu_cores: Optional[Union[List[int], str, int]] = None,
log_gpu_memory: Optional[str] = None,
progress_bar_refresh_rate: Optional[int] = None,
overfit_batches: Union[int, float] = 0.0,
track_grad_norm: Union[int, float, str] = -1,
check_val_every_n_epoch: int = 1,
fast_dev_run: Union[int, bool] = False,
accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1,
max_epochs: Optional[int] = None,
min_epochs: Optional[int] = None,
max_steps: Optional[int] = None,
min_steps: Optional[int] = None,
limit_train_batches: Union[int, float] = 1.0,
limit_val_batches: Union[int, float] = 1.0,
limit_test_batches: Union[int, float] = 1.0,
limit_predict_batches: Union[int, float] = 1.0,
val_check_interval: Union[int, float] = 1.0,
flush_logs_every_n_steps: int = 100,
log_every_n_steps: int = 50,
accelerator: Optional[Union[str, Accelerator]] = None,
sync_batchnorm: bool = False,
precision: int = 32,
weights_summary: Optional[str] = 'top',
weights_save_path: Optional[str] = None,
num_sanity_val_steps: int = 2,
truncated_bptt_steps: Optional[int] = None,
resume_from_checkpoint: Optional[Union[Path, str]] = None,
profiler: Optional[Union[BaseProfiler, str]] = None,
benchmark: bool = False,
deterministic: bool = False,
reload_dataloaders_every_epoch: bool = False,
auto_lr_find: Union[bool, str] = False,
replace_sampler_ddp: bool = True,
terminate_on_nan: bool = False,
auto_scale_batch_size: Union[str, bool] = False,
prepare_data_per_node: bool = True,
plugins: Optional[Union[Plugin, str, list]] = None,
amp_backend: str = 'native',
amp_level: str = 'O2',
distributed_backend: Optional[str] = None,
move_metrics_to_cpu: bool = False,
multiple_trainloader_mode: str = 'max_size_cycle',
stochastic_weight_avg: bool = False
):
r"""
Customize every aspect of training via flags
Args:
accelerator: Previously known as distributed_backend (dp, ddp, ddp2, etc...).
Can also take in an accelerator object for custom hardware.
accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.
amp_backend: The mixed precision backend to use ("native" or "apex")
amp_level: The optimization level to use (O1, O2, etc...).
auto_lr_find: If set to True, will make trainer.tune() run a learning rate finder,
trying to optimize initial learning for faster convergence. trainer.tune() method will
set the suggested learning rate in self.lr or self.learning_rate in the LightningModule.
To use a different key set a string instead of True with the key name.
auto_scale_batch_size: If set to True, will `initially` run a batch size
finder trying to find the largest batch size that fits into memory.
The result will be stored in self.batch_size in the LightningModule.
Additionally, can be set to either `power` that estimates the batch size through
a power search or `binsearch` that estimates the batch size through a binary search.
auto_select_gpus: If enabled and `gpus` is an integer, pick available
gpus automatically. This is especially useful when
GPUs are configured to be in "exclusive mode", such
that only one process at a time can access them.
benchmark: If true enables cudnn.benchmark.
callbacks: Add a callback or list of callbacks.
checkpoint_callback: If ``True``, enable checkpointing.
It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`.
check_val_every_n_epoch: Check val every n train epochs.
default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.
Default: ``os.getcwd()``.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
deterministic: If true enables cudnn.deterministic.
distributed_backend: deprecated. Please use 'accelerator'
fast_dev_run: runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)
of train, val and test to find any bugs (ie: a sort of unit test).
flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
gpus: number of gpus to train on (int) or which GPUs to train on (list or str) applied per node
gradient_clip_val: 0 means don't clip.
limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches)
limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches)
limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches)
limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches)
logger: Logger (or iterable collection of loggers) for experiment tracking.
log_gpu_memory: None, 'min_max', 'all'. Might slow performance
log_every_n_steps: How often to log within steps (defaults to every 50 steps).
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
process_position: orders the progress bar when running multiple models on same machine.
progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.
Ignored when a custom progress bar is passed to :paramref:`~Trainer.callbacks`. Default: None, means
a suitable value will be chosen based on the environment (terminal, Google COLAB, etc.).
profiler: To profile individual steps during training and assist in identifying bottlenecks.
overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int).
plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.
precision: Full precision (32), half precision (16). Can be used on CPU, GPU or TPUs.
max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).
If both max_epochs and max_steps are not specified, defaults to ``max_epochs`` = 1000.
min_epochs: Force training for at least these many epochs. Disabled by default (None).
If both min_epochs and min_steps are not specified, defaults to ``min_epochs`` = 1.
max_steps: Stop training after this number of steps. Disabled by default (None).
min_steps: Force training for at least these number of steps. Disabled by default (None).
num_nodes: number of GPU nodes for distributed training.
num_processes: number of processes for distributed training with distributed_backend="ddp_cpu"
num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.
Set it to `-1` to run all batches in all validation dataloaders.
reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch.
replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this
will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for
train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,
you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.
resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is
no checkpoint file at the path, start from scratch. If resuming from mid-epoch checkpoint,
training will start from the beginning of the next epoch.
sync_batchnorm: Synchronize batch norm layers between process groups/whole world.
terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the
end of each training batch, if any of the parameters or the loss are NaN or +/-inf.
tpu_cores: How many TPU cores to train on (1 or 8) / Single TPU to train on [1]
track_grad_norm: -1 no tracking. Otherwise tracks that p-norm. May be set to 'inf' infinity-norm.
truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of much longer
sequence.
val_check_interval: How often to check the validation set. Use float to check within a training epoch,
use int to check every n steps (batches).
weights_summary: Prints a summary of the weights when training begins.
weights_save_path: Where to save weights if specified. Will override default_root_dir
for checkpoints only. Use this if for whatever reason you need the checkpoints
stored in a different place than the logs written in `default_root_dir`.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
Defaults to `default_root_dir`.
move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.
This can save some gpu memory, but can make training slower. Use with attention.
multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.
In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,
and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets
reload when reaching the minimum length of datasets.
stochastic_weight_avg: Whether to use `Stochastic Weight Averaging (SWA)
<https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/>_`
"""
super().__init__()
distributed_backend = distributed_backend or accelerator
# init connectors
self.dev_debugger = InternalDebugger(self)
self.config_validator = ConfigValidator(self)
self.data_connector = DataConnector(self)
self.optimizer_connector = OptimizerConnector(self)
self.accelerator_connector = AcceleratorConnector(
num_processes, tpu_cores, distributed_backend, auto_select_gpus, gpus, num_nodes, sync_batchnorm, benchmark,
replace_sampler_ddp, deterministic, precision, amp_backend, amp_level, plugins
)
self.logger_connector = LoggerConnector(self, log_gpu_memory)
self.model_connector = ModelConnector(self)
self.callback_connector = CallbackConnector(self)
self.debugging_connector = DebuggingConnector(self)
self.training_tricks_connector = TrainingTricksConnector(self)
self.profile_connector = ProfilerConnector(self)
self.checkpoint_connector = CheckpointConnector(self)
self.slurm_connector = SLURMConnector(self)
self.tuner = Tuner(self)
self.train_loop = TrainLoop(self, multiple_trainloader_mode)
self.evaluation_loop = EvaluationLoop(self)
self.predict_loop = PredictLoop(self)
# training state
self.weights_summary = weights_summary
self.shown_warnings = set()
# init callbacks
# Declare attributes to be set in callback_connector on_trainer_init
self.callback_connector.on_trainer_init(
callbacks, checkpoint_callback, progress_bar_refresh_rate, process_position, default_root_dir,
weights_save_path, resume_from_checkpoint, stochastic_weight_avg
)
# hook
self.on_init_start()
# init optimizer + lr scheduler related flags
self.optimizer_connector.on_trainer_init()
# init data flags
self.data_connector.on_trainer_init(
check_val_every_n_epoch, reload_dataloaders_every_epoch, prepare_data_per_node
)
# init training tricks
self.training_tricks_connector.on_trainer_init(
gradient_clip_val, track_grad_norm, accumulate_grad_batches, truncated_bptt_steps, terminate_on_nan
)
self.train_loop.on_trainer_init(
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
weights_summary,
)
self.evaluation_loop.on_trainer_init()
# configure tuner
self.tuner.on_trainer_init(auto_lr_find, auto_scale_batch_size)
# configure profiler
self.profile_connector.on_trainer_init(profiler)
# init logger flags
self.logger_connector.on_trainer_init(
logger,
flush_logs_every_n_steps,
log_every_n_steps,
move_metrics_to_cpu,
)
# init debugging flags
self.debugging_connector.on_init_start(
limit_train_batches,
limit_val_batches,
limit_test_batches,
limit_predict_batches,
val_check_interval,
overfit_batches,
fast_dev_run,
)
# Callback system
self.on_init_end()
def fit(
self,
model: LightningModule,
train_dataloader: Any = None,
val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,
datamodule: Optional[LightningDataModule] = None,
):
r"""
Runs the full optimization routine.
Args:
datamodule: A instance of :class:`LightningDataModule`.
model: Model to fit.
train_dataloader: Either a single PyTorch DataLoader or a collection of these
(list, dict, nested lists and dicts). In the case of multiple dataloaders, please
see this :ref:`page <multiple-training-dataloaders>`
val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples.
If the model has a predefined val_dataloaders method this will be skipped
"""
# we reuse fit for other functions. When already set, it shouldn't be modified.
if not self.state.running:
self.state = TrainerState.FITTING
if self._running_stage is None:
self.training = True
# set local properties on the model
self.model_connector.copy_trainer_model_properties(model)
# ----------------------------
# LINK DATA
# ----------------------------
# setup data, etc...
self.train_loop.setup_fit(model, train_dataloader, val_dataloaders, datamodule)
# hook
self.data_connector.prepare_data(model)
self.callback_connector._attach_model_callbacks(model, self)
# ----------------------------
# SET UP TRAINING
# ----------------------------
self.call_hook("on_before_accelerator_backend_setup", model)
self.accelerator.connect(model)
self.accelerator.setup_environment()
self.call_setup_hook(model) # allow user to setup lightning_module in accelerator environment
self.accelerator.setup(self, model) # note: this sets up self.lightning_module
# ----------------------------
# INSPECT THE CORE LOOPS
# ----------------------------
f"""
Lightning internal flow looks like this:
{Trainer.fit} or {Trainer.test} or {Trainer.predict} ||
| ||
create accelerator ||
| ||
{self.dispatch} ||
| || LIGHTNING
{self.accelerator.start_training} ||
or {self.accelerator.start_evaluating} ||
or {self.accelerator.start_predicting} || FLOW
| ||
{self.run_stage} ||
| || DIRECTION
{self.run_train} ||
or {self.run_evaluation} ||
or {self.run_predict} ||
| ||
results \/
This is used to guide readers to the core loops: train, test, predict.
{self.run_predict} is the simplest to understand, use `Go to Definition` to read it :)
Search for `start_training` or `start_evaluating` or `start_predicting` in
`pytorch_lightning/plugins/training_type_plugin` to find accelerator dispatch functions.
""" # noqa: W605
# ----------------------------
# TRAIN
# ----------------------------
# hook
if self.state == TrainerState.FITTING:
self.call_hook("on_fit_start")
# plugin will setup fitting (e.g. ddp will launch child processes)
self.pre_dispatch()
# dispatch `start_training` or `start_evaluating` or `start_predicting`
self.dispatch()
# plugin will finalized fitting (e.g. ddp_spawn will load trained model)
self.post_dispatch()
# ----------------------------
# POST-Training CLEAN UP
# ----------------------------
# hook
if self.state == TrainerState.FITTING:
self.call_hook('on_fit_end')
# teardown
self.call_teardown_hook(model)
if self.state != TrainerState.INTERRUPTED:
self.state = TrainerState.FINISHED
self._running_stage = None
# return 1 when finished
# used for testing or when we need to know that training succeeded
return self.accelerator.results or 1
def pre_dispatch(self):
self.accelerator.pre_dispatch(self)
# log hyper-parameters
if self.logger is not None:
# save exp to get started (this is where the first experiment logs are written)
self.logger.log_hyperparams(self.lightning_module.hparams_initial)
self.logger.log_graph(self.lightning_module)
self.logger.save()
def post_dispatch(self):
self.accelerator.post_dispatch(self)
self.accelerator.teardown()
def dispatch(self):
if self.evaluating:
self.accelerator.start_evaluating(self)
elif self.predicting:
self.accelerator.start_predicting(self)
else:
self.accelerator.start_training(self)
def run_stage(self):
results = None
self.profile_connector.setup()
if self.evaluating:
results = self.run_evaluate()
elif self.predicting:
results = self.run_predict()
else:
self.run_train()
return results
def _pre_training_routine(self):
# wait for all to join if on distributed
self.accelerator.barrier("setup_training")
# register auto-resubmit when on SLURM
self.slurm_connector.register_slurm_signal_handlers()
# --------------------------
# Pre-train
# --------------------------
# on pretrain routine start
ref_model = self.lightning_module
self.on_pretrain_routine_start()
ref_model.on_pretrain_routine_start()
# print model summary
if self.is_global_zero and self.weights_summary is not None and not self.testing:
if self.weights_summary in ModelSummary.MODES:
ref_model.summarize(mode=self.weights_summary)
else:
raise MisconfigurationException("weights_summary can be None, " + ", ".join(ModelSummary.MODES))
# restore training and model before hpc is called
self.checkpoint_connector.restore_weights()
# on pretrain routine end
self.on_pretrain_routine_end()
ref_model.on_pretrain_routine_end()
def run_train(self) -> None:
self._pre_training_routine()
if not self.is_global_zero and self.progress_bar_callback is not None:
self.progress_bar_callback.disable()
self.run_sanity_check(self.lightning_module)
self.checkpoint_connector.has_trained = False
# enable train mode
model = self.lightning_module
model.train()
torch.set_grad_enabled(True)
# reload data when needed
self.train_loop.reset_train_val_dataloaders(model)
# hook
self.train_loop.on_train_start()
try:
if self.train_loop.should_skip_training():
return
# run all epochs
epochs = range(self.current_epoch, self.max_epochs) if self.max_epochs else count(self.current_epoch)
for epoch in epochs:
# hook
self.train_loop.on_train_epoch_start(epoch)
with self.profiler.profile("run_training_epoch"):
# run train epoch
self.train_loop.run_training_epoch()
if self.max_steps and self.max_steps <= self.global_step:
return
# early stopping
met_min_epochs = (epoch >= self.min_epochs - 1) if self.min_epochs else True
met_min_steps = self.global_step >= self.min_steps if self.min_steps else True
if self.should_stop:
if met_min_epochs and met_min_steps:
return
else:
log.info(
'Trainer was signaled to stop but required minimum epochs'
f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has'
' not been met. Training will continue...'
)
# hook
self.train_loop.on_train_end()
except KeyboardInterrupt:
rank_zero_warn('Detected KeyboardInterrupt, attempting graceful shutdown...')
# user could press Ctrl+c many times... only shutdown once
if not self.interrupted:
self.state = TrainerState.INTERRUPTED
self.on_keyboard_interrupt()
except (RuntimeError, AssertionError):
# if an exception is raised, the finally block is executed and can hide the actual exception
# that was initially raised if `on_train_end` also raises an exception. we want to avoid that
# for assertions and other runtime errors so we aren't misled while debugging
print_exc()
finally:
# hook
self.train_loop.on_train_end()
def run_evaluation(self, on_epoch=False):
if not (self.evaluating or self.sanity_checking):
rank_zero_warn(
f"`trainer.run_evaluation()` was called but the running stage is set to {self._running_stage}."
" This should not happen normally. Setting it to `RunningStage.VALIDATING`", RuntimeWarning
)
self.validating = True
# reset cached results
self.logger_connector.reset()
# prepare dataloaders
dataloaders, max_batches = self.evaluation_loop.get_evaluation_dataloaders()
# check if we want to skip this evaluation
if self.evaluation_loop.should_skip_evaluation(max_batches):
return [], []
# enable eval mode + no grads
self.evaluation_loop.on_evaluation_model_eval()
# ref model
model = self.lightning_module
model.zero_grad()
torch.set_grad_enabled(False)
# hook
self.evaluation_loop.on_evaluation_start()
# set up the eval loop
self.evaluation_loop.setup(model, max_batches, dataloaders)
# hook
self.evaluation_loop.on_evaluation_epoch_start()
# run validation/testing
for dataloader_idx, dataloader in enumerate(dataloaders):
# bookkeeping
dl_outputs = []
dataloader = self.accelerator.process_dataloader(dataloader)
dl_max_batches = self.evaluation_loop.max_batches[dataloader_idx]
for batch_idx, batch in enumerate(dataloader):
if batch is None:
continue
# stop short when running on limited batches
if batch_idx >= dl_max_batches:
break
# hook
self.evaluation_loop.on_evaluation_batch_start(batch, batch_idx, dataloader_idx)
# lightning module methods
with self.profiler.profile("evaluation_step_and_end"):
output = self.evaluation_loop.evaluation_step(batch, batch_idx, dataloader_idx)
output = self.evaluation_loop.evaluation_step_end(output)
# hook + store predictions
self.evaluation_loop.on_evaluation_batch_end(output, batch, batch_idx, dataloader_idx)
# log batch metrics
self.evaluation_loop.log_evaluation_step_metrics(output, batch_idx)
# track epoch level outputs
dl_outputs = self.track_output_for_epoch_end(dl_outputs, output)
# store batch level output per dataloader
self.evaluation_loop.outputs.append(dl_outputs)
# lightning module method
deprecated_eval_results = self.evaluation_loop.evaluation_epoch_end()
# hook
self.evaluation_loop.on_evaluation_epoch_end()
# update epoch-level lr_schedulers
if on_epoch:
self.optimizer_connector.update_learning_rates(interval='epoch')
# hook
self.evaluation_loop.on_evaluation_end()
# log epoch metrics
eval_loop_results = self.evaluation_loop.log_epoch_metrics_on_evaluation_end()
# save predictions to disk
self.evaluation_loop.predictions.to_disk()
# enable train mode again
self.evaluation_loop.on_evaluation_model_train()
torch.set_grad_enabled(True)
return eval_loop_results, deprecated_eval_results
def track_output_for_epoch_end(self, outputs, output):
if output is not None:
if isinstance(output, Result):
output = output.detach()
if self.move_metrics_to_cpu:
output = output.cpu()
elif isinstance(output, dict):
output = recursive_detach(output, to_cpu=self.move_metrics_to_cpu)
elif isinstance(output, torch.Tensor) and output.is_cuda and self.move_metrics_to_cpu:
output = output.cpu()
outputs.append(output)
return outputs
def run_evaluate(self):
if not self.is_global_zero and self.progress_bar_callback is not None:
self.progress_bar_callback.disable()
assert self.evaluating
with self.profiler.profile(f"run_{self._running_stage}_evaluation"):
eval_loop_results, _ = self.run_evaluation()
if len(eval_loop_results) == 0:
return 1
# remove the tensors from the eval results
for i, result in enumerate(eval_loop_results):
if isinstance(result, dict):
for k, v in result.items():
if isinstance(v, torch.Tensor):
result[k] = v.cpu().item()
return eval_loop_results
def run_predict(self):
# prepare dataloaders
dataloaders, max_batches = self.predict_loop.get_predict_dataloaders()
# check if we want to skip this evaluation
if self.predict_loop.should_skip_predict(max_batches):
return []
# ref model
model = self.lightning_module
# enable eval mode + no grads
self.predict_loop.on_predict_model_eval()
model.zero_grad()
torch.set_grad_enabled(False)
# set up the eval loop
self.predict_loop.setup(model, max_batches, dataloaders)
# run validation/testing
for dataloader_idx, dataloader in enumerate(dataloaders):
dataloader = self.accelerator.process_dataloader(dataloader)
dl_max_batches = self.predict_loop.max_batches[dataloader_idx]
for batch_idx, batch in enumerate(dataloader):
if batch is None:
continue
# stop short when running on limited batches
if batch_idx >= dl_max_batches:
break
# lightning module methods
with self.profiler.profile("predict"):
self.predict_loop.predict(batch, batch_idx, dataloader_idx)
results = self.predict_loop.on_predict_epoch_end()
return results
def run_sanity_check(self, ref_model):
using_val_step = ref_model.val_dataloader is not None and is_overridden('validation_step', ref_model)
should_sanity_check = using_val_step and self.num_sanity_val_steps > 0 and self.limit_val_batches > 0
# run tiny validation (if validation defined)
# to make sure program won't crash during val
if should_sanity_check:
stage = self._running_stage
self.sanity_checking = True
# hook and callback
self.on_sanity_check_start()
# run eval step
_, eval_results = self.run_evaluation()
# allow no returns from eval
if eval_results is not None and len(eval_results) > 0:
# when we get a list back, used only the last item
if isinstance(eval_results, list):
eval_results = eval_results[-1]
_, _, _, callback_metrics, _ = self.process_dict_result(eval_results)
self.logger_connector.callback_metrics = callback_metrics
self.on_sanity_check_end()
self._running_stage = stage
def validate(
self,
model: Optional[LightningModule] = None,
val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,
ckpt_path: Optional[str] = 'best',
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
):
r"""
Perform one evaluation epoch over the validation set.
Args:
model: The model to validate.
val_dataloaders: Either a single PyTorch DataLoader or a list of them,
specifying validation samples.
ckpt_path: Either ``best`` or path to the checkpoint you wish to validate.
If ``None``, use the current weights of the model.
When the model is given as argument, this parameter will not apply.
verbose: If True, prints the validation results.
datamodule: A instance of :class:`LightningDataModule`.
Returns:
The dictionary with final validation results returned by validation_epoch_end.
If validation_epoch_end is not defined, the output is a list of the dictionaries
returned by validation_step.
"""
# --------------------
# SETUP HOOK
# --------------------
self.verbose_evaluate = verbose
self.state = TrainerState.VALIDATING
self.validating = True
# If you supply a datamodule you can't supply val_dataloaders
if val_dataloaders and datamodule:
raise MisconfigurationException(
'You cannot pass both `trainer.validate(val_dataloaders=..., datamodule=...)`'
)
model_provided = model is not None
model = model or self.lightning_module
# Attach datamodule to get setup/prepare_data added to model before the call to it below
self.data_connector.attach_datamodule(model, datamodule)
# Attach dataloaders (if given)
self.data_connector.attach_dataloaders(model, val_dataloaders=val_dataloaders)
if not model_provided:
self.validated_ckpt_path = self.__load_ckpt_weights(model, ckpt_path=ckpt_path)
# run validate
results = self.fit(model)
assert self.state.stopped
self.validating = False
return results
def test(
self,
model: Optional[LightningModule] = None,
test_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,
ckpt_path: Optional[str] = 'best',
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
):
r"""
Perform one evaluation epoch over the test set. It's separated from
fit to make sure you never run on your test set until you want to.
Args:
model: The model to test.
test_dataloaders: Either a single PyTorch DataLoader or a list of them,
specifying test samples.
ckpt_path: Either ``best`` or path to the checkpoint you wish to test.
If ``None``, use the current weights of the model.
When the model is given as argument, this parameter will not apply.
verbose: If True, prints the test results.
datamodule: A instance of :class:`LightningDataModule`.
Returns:
Returns a list of dictionaries, one for each test dataloader containing their respective metrics.
"""
# --------------------
# SETUP HOOK
# --------------------
self.verbose_evaluate = verbose
self.state = TrainerState.TESTING
self.testing = True
# If you supply a datamodule you can't supply test_dataloaders
if test_dataloaders and datamodule:
raise MisconfigurationException('You cannot pass both `trainer.test(test_dataloaders=..., datamodule=...)`')
model_provided = model is not None
model = model or self.lightning_module
# Attach datamodule to get setup/prepare_data added to model before the call to it below
self.data_connector.attach_datamodule(model, datamodule)
# Attach dataloaders (if given)
self.data_connector.attach_dataloaders(model, test_dataloaders=test_dataloaders)
if not model_provided:
self.tested_ckpt_path = self.__load_ckpt_weights(model, ckpt_path=ckpt_path)
# run test
results = self.fit(model)
assert self.state.stopped
self.testing = False
return results
def __load_ckpt_weights(
self,
model,
ckpt_path: Optional[str] = None,
) -> Optional[str]:
# if user requests the best checkpoint but we don't have it, error
if ckpt_path == 'best' and not self.checkpoint_callback.best_model_path:
raise MisconfigurationException(
'ckpt_path is "best", but `ModelCheckpoint` is not configured to save the best model.'
)
# load best weights
if ckpt_path is not None:
# ckpt_path is 'best' so load the best model
if ckpt_path == 'best':
ckpt_path = self.checkpoint_callback.best_model_path
if not ckpt_path:
fn = self.state.value
raise MisconfigurationException(
f'`.{fn}()` found no path for the best weights: "{ckpt_path}". Please'
' specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`'
)
self.training_type_plugin.barrier()
ckpt = pl_load(ckpt_path, map_location=lambda storage, loc: storage)
model.load_state_dict(ckpt['state_dict'])
return ckpt_path
def predict(
self,
model: Optional[LightningModule] = None,
dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,
datamodule: Optional[LightningDataModule] = None,
):
r"""
Separates from fit to make sure you never run on your predictions set until you want to.
This will call the model forward function to compute predictions.
Args:
model: The model to predict on.
dataloaders: Either a single
Pytorch Dataloader or a list of them, specifying inference samples.
datamodule: A instance of :class:`LightningDataModule`.
Returns:
Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.
"""
# --------------------
# SETUP HOOK
# --------------------
# If you supply a datamodule you can't supply dataloaders
model = model or self.lightning_module
self.state = TrainerState.PREDICTING
self.predicting = True
if dataloaders and datamodule:
raise MisconfigurationException(
'You cannot pass dataloaders to trainer.predict if you supply a datamodule.'
)
# Attach datamodule to get setup/prepare_data added to model before the call to it below
self.data_connector.attach_datamodule(model, datamodule)
# Attach dataloaders (if given)
self.data_connector.attach_dataloaders(model, predict_dataloaders=dataloaders)
results = self.fit(model)
assert self.state.stopped
self.predicting = False
return results
def tune(
self,
model: LightningModule,
train_dataloader: Optional[DataLoader] = None,
val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,
datamodule: Optional[LightningDataModule] = None,
):
r"""
Runs routines to tune hyperparameters before training.
Args:
datamodule: A instance of :class:`LightningDataModule`.
model: Model to tune.
train_dataloader: A Pytorch DataLoader with training samples. If the model has
a predefined train_dataloader method this will be skipped.
val_dataloaders: Either a single Pytorch Dataloader or a list of them, specifying validation samples.
If the model has a predefined val_dataloaders method this will be skipped
"""
self.state = TrainerState.TUNING
self.tuning = True
self.tuner.tune(model, train_dataloader, val_dataloaders, datamodule)
assert self.state.stopped
self.tuning = False
def call_setup_hook(self, model: LightningModule) -> None:
assert self.state.running, f"TrainerState: {self.state}"
state = self._setup_state
if self.datamodule is not None:
called = getattr(self.datamodule, f'has_setup_{state}')
if not called:
self.datamodule.setup(stage=state)
self.setup(model, stage=state)
model.setup(stage=state)
def call_teardown_hook(self, model: LightningModule) -> None:
state = self._teardown_state
self.profiler.teardown(stage=state)
self.teardown(stage=state)
model.teardown(stage=state)
def _reset_result_and_set_hook_fx_name(self, hook_name):
# on_before_zero_grad is called within training_step
if "batch_start" in hook_name or "on_before_zero_grad" in hook_name:
return True
model_ref = self.lightning_module
if model_ref is not None:
# used to track current hook name called
model_ref._results = Result()
model_ref._current_hook_fx_name = hook_name
return False
def _cache_logged_metrics(self):
model_ref = self.lightning_module
if model_ref is not None:
# capture logging for this hook
self.logger_connector.cache_logged_metrics()
def call_hook(self, hook_name, *args, **kwargs):
# set hook_name to model + reset Result obj
skip = self._reset_result_and_set_hook_fx_name(hook_name)
# always profile hooks
with self.profiler.profile(hook_name):
# first call trainer hook
if hasattr(self, hook_name):
trainer_hook = getattr(self, hook_name)
trainer_hook(*args, **kwargs)
# next call hook in lightningModule
output = None
model_ref = self.lightning_module
if is_overridden(hook_name, model_ref):
hook_fx = getattr(model_ref, hook_name)
output = hook_fx(*args, **kwargs)
# if the PL module doesn't have the hook then call the accelerator
# used to auto-reduce things for the user with Results obj
elif hasattr(self.accelerator, hook_name):
accelerator_hook = getattr(self.accelerator, hook_name)
output = accelerator_hook(*args, **kwargs)
if not skip:
self._cache_logged_metrics()
return output
| [
"torch.set_grad_enabled"
] | 1.4 | bmahlbrand/pytorch-lightning | 95e85e4d2d8f644b0ccc1f59d59634d6dd0f5d65 |
1.3 | from functools import partial
import pytest
import torch
from sklearn.metrics import (
accuracy_score as sk_accuracy,
jaccard_score as sk_jaccard_score,
precision_score as sk_precision,
recall_score as sk_recall,
roc_auc_score as sk_roc_auc_score,
)
from pytorch_lightning import seed_everything
from pytorch_lightning.metrics.functional.classification import (
stat_scores,
stat_scores_multiple_classes,
accuracy,
precision,
recall,
_binary_clf_curve,
dice_score,
auroc,
multiclass_auroc,
auc,
iou,
)
from pytorch_lightning.metrics.utils import to_onehot, get_num_classes, to_categorical
@pytest.mark.parametrize(['sklearn_metric', 'torch_metric', 'only_binary'], [
pytest.param(sk_accuracy, accuracy, False, id='accuracy'),
pytest.param(partial(sk_jaccard_score, average='macro'), iou, False, id='iou'),
pytest.param(partial(sk_precision, average='micro'), precision, False, id='precision'),
pytest.param(partial(sk_recall, average='micro'), recall, False, id='recall'),
pytest.param(sk_roc_auc_score, auroc, True, id='auroc')
])
def test_against_sklearn(sklearn_metric, torch_metric, only_binary):
"""Compare PL metrics to sklearn version. """
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# for metrics with only_binary=False, we try out different combinations of number
# of labels in pred and target (also test binary)
# for metrics with only_binary=True, target is always binary and pred will be
# (unnormalized) class probabilities
class_comb = [(5, 2)] if only_binary else [(10, 10), (5, 10), (10, 5), (2, 2)]
for n_cls_pred, n_cls_target in class_comb:
pred = torch.randint(n_cls_pred, (300,), device=device)
target = torch.randint(n_cls_target, (300,), device=device)
sk_score = sklearn_metric(target.cpu().detach().numpy(),
pred.cpu().detach().numpy())
pl_score = torch_metric(pred, target)
# if multi output
if isinstance(sk_score, tuple):
sk_score = [torch.tensor(sk_s.copy(), dtype=torch.float, device=device) for sk_s in sk_score]
for sk_s, pl_s in zip(sk_score, pl_score):
assert torch.allclose(sk_s, pl_s.float())
else:
sk_score = torch.tensor(sk_score, dtype=torch.float, device=device)
assert torch.allclose(sk_score, pl_score)
@pytest.mark.parametrize('class_reduction', ['micro', 'macro', 'weighted'])
@pytest.mark.parametrize(['sklearn_metric', 'torch_metric'], [
pytest.param(sk_precision, precision, id='precision'),
pytest.param(sk_recall, recall, id='recall'),
])
def test_different_reduction_against_sklearn(class_reduction, sklearn_metric, torch_metric):
""" Test metrics where the class_reduction parameter have a correponding
value in sklearn """
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pred = torch.randint(10, (300,), device=device)
target = torch.randint(10, (300,), device=device)
sk_score = sklearn_metric(target.cpu().detach().numpy(),
pred.cpu().detach().numpy(),
average=class_reduction)
sk_score = torch.tensor(sk_score, dtype=torch.float, device=device)
pl_score = torch_metric(pred, target, class_reduction=class_reduction)
assert torch.allclose(sk_score, pl_score)
def test_onehot():
test_tensor = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
expected = torch.stack([
torch.cat([torch.eye(5, dtype=int), torch.zeros((5, 5), dtype=int)]),
torch.cat([torch.zeros((5, 5), dtype=int), torch.eye(5, dtype=int)])
])
assert test_tensor.shape == (2, 5)
assert expected.shape == (2, 10, 5)
onehot_classes = to_onehot(test_tensor, num_classes=10)
onehot_no_classes = to_onehot(test_tensor)
assert torch.allclose(onehot_classes, onehot_no_classes)
assert onehot_classes.shape == expected.shape
assert onehot_no_classes.shape == expected.shape
assert torch.allclose(expected.to(onehot_no_classes), onehot_no_classes)
assert torch.allclose(expected.to(onehot_classes), onehot_classes)
def test_to_categorical():
test_tensor = torch.stack([
torch.cat([torch.eye(5, dtype=int), torch.zeros((5, 5), dtype=int)]),
torch.cat([torch.zeros((5, 5), dtype=int), torch.eye(5, dtype=int)])
]).to(torch.float)
expected = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
assert expected.shape == (2, 5)
assert test_tensor.shape == (2, 10, 5)
result = to_categorical(test_tensor)
assert result.shape == expected.shape
assert torch.allclose(result, expected.to(result.dtype))
@pytest.mark.parametrize(['pred', 'target', 'num_classes', 'expected_num_classes'], [
pytest.param(torch.rand(32, 10, 28, 28), torch.randint(10, (32, 28, 28)), 10, 10),
pytest.param(torch.rand(32, 10, 28, 28), torch.randint(10, (32, 28, 28)), None, 10),
pytest.param(torch.rand(32, 28, 28), torch.randint(10, (32, 28, 28)), None, 10),
])
def test_get_num_classes(pred, target, num_classes, expected_num_classes):
assert get_num_classes(pred, target, num_classes) == expected_num_classes
@pytest.mark.parametrize(['pred', 'target', 'expected_tp', 'expected_fp',
'expected_tn', 'expected_fn', 'expected_support'], [
pytest.param(torch.tensor([0., 2., 4., 4.]), torch.tensor([0., 4., 3., 4.]), 1, 1, 1, 1, 2),
pytest.param(to_onehot(torch.tensor([0., 2., 4., 4.])), torch.tensor([0., 4., 3., 4.]), 1, 1, 1, 1, 2)
])
def test_stat_scores(pred, target, expected_tp, expected_fp, expected_tn, expected_fn, expected_support):
tp, fp, tn, fn, sup = stat_scores(pred, target, class_index=4)
assert tp.item() == expected_tp
assert fp.item() == expected_fp
assert tn.item() == expected_tn
assert fn.item() == expected_fn
assert sup.item() == expected_support
@pytest.mark.parametrize(['pred', 'target', 'reduction', 'expected_tp', 'expected_fp',
'expected_tn', 'expected_fn', 'expected_support'], [
pytest.param(torch.tensor([0., 2., 4., 4.]), torch.tensor([0., 4., 3., 4.]), 'none',
[1, 0, 0, 0, 1], [0, 0, 1, 0, 1], [3, 4, 3, 3, 1], [0, 0, 0, 1, 1], [1, 0, 0, 1, 2]),
pytest.param(to_onehot(torch.tensor([0., 2., 4., 4.])), torch.tensor([0., 4., 3., 4.]), 'none',
[1, 0, 0, 0, 1], [0, 0, 1, 0, 1], [3, 4, 3, 3, 1], [0, 0, 0, 1, 1], [1, 0, 0, 1, 2]),
pytest.param(to_onehot(torch.tensor([0., 2., 4., 4.])), torch.tensor([0., 4., 3., 4.]), 'sum',
torch.tensor(2), torch.tensor(2), torch.tensor(14), torch.tensor(2), torch.tensor(4)),
pytest.param(to_onehot(torch.tensor([0., 2., 4., 4.])), torch.tensor([0., 4., 3., 4.]), 'elementwise_mean',
torch.tensor(0.4), torch.tensor(0.4), torch.tensor(2.8), torch.tensor(0.4), torch.tensor(0.8))
])
def test_stat_scores_multiclass(pred, target, reduction, expected_tp, expected_fp, expected_tn, expected_fn, expected_support):
tp, fp, tn, fn, sup = stat_scores_multiple_classes(pred, target, reduction=reduction)
assert torch.allclose(torch.tensor(expected_tp).to(tp), tp)
assert torch.allclose(torch.tensor(expected_fp).to(fp), fp)
assert torch.allclose(torch.tensor(expected_tn).to(tn), tn)
assert torch.allclose(torch.tensor(expected_fn).to(fn), fn)
assert torch.allclose(torch.tensor(expected_support).to(sup), sup)
def test_multilabel_accuracy():
# Dense label indicator matrix format
y1 = torch.tensor([[0, 1, 1], [1, 0, 1]])
y2 = torch.tensor([[0, 0, 1], [1, 0, 1]])
assert torch.allclose(accuracy(y1, y2, class_reduction='none'), torch.tensor([2 / 3, 1.]))
assert torch.allclose(accuracy(y1, y1, class_reduction='none'), torch.tensor([1., 1.]))
assert torch.allclose(accuracy(y2, y2, class_reduction='none'), torch.tensor([1., 1.]))
assert torch.allclose(accuracy(y2, torch.logical_not(y2), class_reduction='none'), torch.tensor([0., 0.]))
assert torch.allclose(accuracy(y1, torch.logical_not(y1), class_reduction='none'), torch.tensor([0., 0.]))
# num_classes does not match extracted number from input we expect a warning
with pytest.warns(RuntimeWarning,
match=r'You have set .* number of classes which is'
r' different from predicted (.*) and'
r' target (.*) number of classes'):
_ = accuracy(y2, torch.zeros_like(y2), num_classes=3)
def test_accuracy():
pred = torch.tensor([0, 1, 2, 3])
target = torch.tensor([0, 1, 2, 2])
acc = accuracy(pred, target)
assert acc.item() == 0.75
pred = torch.tensor([0, 1, 2, 2])
target = torch.tensor([0, 1, 1, 3])
acc = accuracy(pred, target)
assert acc.item() == 0.50
@pytest.mark.parametrize(['pred', 'target', 'expected_prec', 'expected_rec'], [
pytest.param(torch.tensor([1., 0., 1., 0.]), torch.tensor([0., 1., 1., 0.]), [0.5, 0.5], [0.5, 0.5]),
pytest.param(to_onehot(torch.tensor([1., 0., 1., 0.])), torch.tensor([0., 1., 1., 0.]), [0.5, 0.5], [0.5, 0.5])
])
def test_precision_recall(pred, target, expected_prec, expected_rec):
prec = precision(pred, target, class_reduction='none')
rec = recall(pred, target, class_reduction='none')
assert torch.allclose(torch.tensor(expected_prec).to(prec), prec)
assert torch.allclose(torch.tensor(expected_rec).to(rec), rec)
@pytest.mark.parametrize(['sample_weight', 'pos_label', "exp_shape"], [
pytest.param(1, 1., 42),
pytest.param(None, 1., 42),
])
def test_binary_clf_curve(sample_weight, pos_label, exp_shape):
# TODO: move back the pred and target to test func arguments
# if you fix the array inside the function, you'd also have fix the shape,
# because when the array changes, you also have to fix the shape
seed_everything(0)
pred = torch.randint(low=51, high=99, size=(100,), dtype=torch.float) / 100
target = torch.tensor([0, 1] * 50, dtype=torch.int)
if sample_weight is not None:
sample_weight = torch.ones_like(pred) * sample_weight
fps, tps, thresh = _binary_clf_curve(pred, target, sample_weight, pos_label)
assert isinstance(tps, torch.Tensor)
assert isinstance(fps, torch.Tensor)
assert isinstance(thresh, torch.Tensor)
assert tps.shape == (exp_shape,)
assert fps.shape == (exp_shape,)
assert thresh.shape == (exp_shape,)
@pytest.mark.parametrize(['pred', 'target', 'expected'], [
pytest.param([0, 1, 0, 1], [0, 1, 0, 1], 1.),
pytest.param([1, 1, 0, 0], [0, 0, 1, 1], 0.),
pytest.param([1, 1, 1, 1], [1, 1, 0, 0], 0.5),
pytest.param([1, 1, 0, 0], [1, 1, 0, 0], 1.),
pytest.param([0.5, 0.5, 0.5, 0.5], [1, 1, 0, 0], 0.5),
])
def test_auroc(pred, target, expected):
score = auroc(torch.tensor(pred), torch.tensor(target)).item()
assert score == expected
def test_multiclass_auroc():
with pytest.raises(ValueError,
match=r".*probabilities, i.e. they should sum up to 1.0 over classes"):
_ = multiclass_auroc(pred=torch.tensor([[0.9, 0.9],
[1.0, 0]]),
target=torch.tensor([0, 1]))
with pytest.raises(ValueError,
match=r".*not defined when all of the classes do not occur in the target.*"):
_ = multiclass_auroc(pred=torch.rand((4, 3)).softmax(dim=1),
target=torch.tensor([1, 0, 1, 0]))
with pytest.raises(ValueError,
match=r".*does not equal the number of classes passed in 'num_classes'.*"):
_ = multiclass_auroc(pred=torch.rand((5, 4)).softmax(dim=1),
target=torch.tensor([0, 1, 2, 2, 3]),
num_classes=6)
@pytest.mark.parametrize('n_cls', [2, 5, 10, 50])
def test_multiclass_auroc_against_sklearn(n_cls):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
n_samples = 300
pred = torch.rand(n_samples, n_cls, device=device).softmax(dim=1)
target = torch.randint(n_cls, (n_samples,), device=device)
# Make sure target includes all class labels so that multiclass AUROC is defined
target[10:10 + n_cls] = torch.arange(n_cls)
pl_score = multiclass_auroc(pred, target)
# For the binary case, sklearn expects an (n_samples,) array of probabilities of
# the positive class
pred = pred[:, 1] if n_cls == 2 else pred
sk_score = sk_roc_auc_score(target.cpu().detach().numpy(),
pred.cpu().detach().numpy(),
multi_class="ovr")
sk_score = torch.tensor(sk_score, dtype=torch.float, device=device)
assert torch.allclose(sk_score, pl_score)
@pytest.mark.parametrize(['x', 'y', 'expected'], [
pytest.param([0, 1], [0, 1], 0.5),
pytest.param([1, 0], [0, 1], 0.5),
pytest.param([1, 0, 0], [0, 1, 1], 0.5),
pytest.param([0, 1], [1, 1], 1),
pytest.param([0, 0.5, 1], [0, 0.5, 1], 0.5),
])
def test_auc(x, y, expected):
# Test Area Under Curve (AUC) computation
assert auc(torch.tensor(x), torch.tensor(y)) == expected
@pytest.mark.parametrize(['pred', 'target', 'expected'], [
pytest.param([[0, 0], [1, 1]], [[0, 0], [1, 1]], 1.),
pytest.param([[1, 1], [0, 0]], [[0, 0], [1, 1]], 0.),
pytest.param([[1, 1], [1, 1]], [[1, 1], [0, 0]], 2 / 3),
pytest.param([[1, 1], [0, 0]], [[1, 1], [0, 0]], 1.),
])
def test_dice_score(pred, target, expected):
score = dice_score(torch.tensor(pred), torch.tensor(target))
assert score == expected
@pytest.mark.parametrize(['half_ones', 'reduction', 'ignore_index', 'expected'], [
pytest.param(False, 'none', None, torch.Tensor([1, 1, 1])),
pytest.param(False, 'elementwise_mean', None, torch.Tensor([1])),
pytest.param(False, 'none', 0, torch.Tensor([1, 1])),
pytest.param(True, 'none', None, torch.Tensor([0.5, 0.5, 0.5])),
pytest.param(True, 'elementwise_mean', None, torch.Tensor([0.5])),
pytest.param(True, 'none', 0, torch.Tensor([0.5, 0.5])),
])
def test_iou(half_ones, reduction, ignore_index, expected):
pred = (torch.arange(120) % 3).view(-1, 1)
target = (torch.arange(120) % 3).view(-1, 1)
if half_ones:
pred[:60] = 1
iou_val = iou(
pred=pred,
target=target,
ignore_index=ignore_index,
reduction=reduction,
)
assert torch.allclose(iou_val, expected, atol=1e-9)
def test_iou_input_check():
with pytest.raises(ValueError, match=r"'pred' shape (.*) must equal 'target' shape (.*)"):
_ = iou(pred=torch.randint(0, 2, (3, 4, 3)),
target=torch.randint(0, 2, (3, 3)))
with pytest.raises(ValueError, match="'pred' must contain integer targets."):
_ = iou(pred=torch.rand((3, 3)),
target=torch.randint(0, 2, (3, 3)))
@pytest.mark.parametrize('metric', [auroc])
def test_error_on_multiclass_input(metric):
""" check that these metrics raise an error if they are used for multiclass problems """
pred = torch.randint(0, 10, (100, ))
target = torch.randint(0, 10, (100, ))
with pytest.raises(ValueError, match="AUROC metric is meant for binary classification"):
_ = metric(pred, target)
# TODO: When the jaccard_score of the sklearn version we use accepts `zero_division` (see
# https://github.com/scikit-learn/scikit-learn/pull/17866), consider adding a test here against our
# `absent_score`.
@pytest.mark.parametrize(['pred', 'target', 'ignore_index', 'absent_score', 'num_classes', 'expected'], [
# Note that -1 is used as the absent_score in almost all tests here to distinguish it from the range of valid
# scores the function can return ([0., 1.] range, inclusive).
# 2 classes, class 0 is correct everywhere, class 1 is absent.
pytest.param([0], [0], None, -1., 2, [1., -1.]),
pytest.param([0, 0], [0, 0], None, -1., 2, [1., -1.]),
# absent_score not applied if only class 0 is present and it's the only class.
pytest.param([0], [0], None, -1., 1, [1.]),
# 2 classes, class 1 is correct everywhere, class 0 is absent.
pytest.param([1], [1], None, -1., 2, [-1., 1.]),
pytest.param([1, 1], [1, 1], None, -1., 2, [-1., 1.]),
# When 0 index ignored, class 0 does not get a score (not even the absent_score).
pytest.param([1], [1], 0, -1., 2, [1.0]),
# 3 classes. Only 0 and 2 are present, and are perfectly predicted. 1 should get absent_score.
pytest.param([0, 2], [0, 2], None, -1., 3, [1., -1., 1.]),
pytest.param([2, 0], [2, 0], None, -1., 3, [1., -1., 1.]),
# 3 classes. Only 0 and 1 are present, and are perfectly predicted. 2 should get absent_score.
pytest.param([0, 1], [0, 1], None, -1., 3, [1., 1., -1.]),
pytest.param([1, 0], [1, 0], None, -1., 3, [1., 1., -1.]),
# 3 classes, class 0 is 0.5 IoU, class 1 is 0 IoU (in pred but not target; should not get absent_score), class
# 2 is absent.
pytest.param([0, 1], [0, 0], None, -1., 3, [0.5, 0., -1.]),
# 3 classes, class 0 is 0.5 IoU, class 1 is 0 IoU (in target but not pred; should not get absent_score), class
# 2 is absent.
pytest.param([0, 0], [0, 1], None, -1., 3, [0.5, 0., -1.]),
# Sanity checks with absent_score of 1.0.
pytest.param([0, 2], [0, 2], None, 1.0, 3, [1., 1., 1.]),
pytest.param([0, 2], [0, 2], 0, 1.0, 3, [1., 1.]),
])
def test_iou_absent_score(pred, target, ignore_index, absent_score, num_classes, expected):
iou_val = iou(
pred=torch.tensor(pred),
target=torch.tensor(target),
ignore_index=ignore_index,
absent_score=absent_score,
num_classes=num_classes,
reduction='none',
)
assert torch.allclose(iou_val, torch.tensor(expected).to(iou_val))
# example data taken from
# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/metrics/tests/test_ranking.py
@pytest.mark.parametrize(['pred', 'target', 'ignore_index', 'num_classes', 'reduction', 'expected'], [
# Ignoring an index outside of [0, num_classes-1] should have no effect.
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], None, 3, 'none', [1, 1 / 2, 2 / 3]),
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], -1, 3, 'none', [1, 1 / 2, 2 / 3]),
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 255, 3, 'none', [1, 1 / 2, 2 / 3]),
# Ignoring a valid index drops only that index from the result.
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, 'none', [1 / 2, 2 / 3]),
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 1, 3, 'none', [1, 2 / 3]),
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 2, 3, 'none', [1, 1 / 2]),
# When reducing to mean or sum, the ignored index does not contribute to the output.
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, 'elementwise_mean', [7 / 12]),
pytest.param([0, 1, 1, 2, 2], [0, 1, 2, 2, 2], 0, 3, 'sum', [7 / 6]),
])
def test_iou_ignore_index(pred, target, ignore_index, num_classes, reduction, expected):
iou_val = iou(
pred=torch.tensor(pred),
target=torch.tensor(target),
ignore_index=ignore_index,
num_classes=num_classes,
reduction=reduction,
)
assert torch.allclose(iou_val, torch.tensor(expected).to(iou_val))
| [
"torch.zeros",
"torch.rand",
"torch.arange",
"torch.logical_not",
"torch.randint",
"torch.cuda.is_available",
"torch.tensor",
"torch.eye",
"torch.ones_like",
"torch.zeros_like",
"torch.allclose",
"torch.Tensor"
] | 1.3 | rwbfd/pytorch-lightning | f518ee6e25d1499f73cec86ca8b3f584d0fa440d |
1.8 | import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from gpsa import VariationalGPSA, matern12_kernel, rbf_kernel, LossNotDecreasingChecker
from gpsa.plotting import callback_twod
sys.path.append("../../data")
from simulated.generate_twod_data import generate_twod_data
import matplotlib.animation as animation
import matplotlib.image as mpimg
import os
from os.path import join as pjoin
import anndata
import matplotlib
font = {"size": 25}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
device = "cuda" if torch.cuda.is_available() else "cpu"
LATEX_FONTSIZE = 35
n_spatial_dims = 2
n_views = 2
m_G = 50
m_X_per_view = 50
N_EPOCHS = 3000
PRINT_EVERY = 100
def two_d_gpsa(
n_outputs,
n_epochs,
n_latent_gps,
warp_kernel_variance=0.1,
noise_variance=0.0,
plot_intermediate=True,
fixed_view_data=None,
fixed_view_idx=None,
):
x = torch.from_numpy(X).float().clone()
y = torch.from_numpy(Y).float().clone()
data_dict = {
"expression": {
"spatial_coords": x,
"outputs": y,
"n_samples_list": n_samples_list,
}
}
model = VariationalGPSA(
data_dict,
n_spatial_dims=n_spatial_dims,
m_X_per_view=m_X_per_view,
m_G=m_G,
data_init=True,
minmax_init=False,
grid_init=False,
n_latent_gps=n_latent_gps,
mean_function="identity_fixed",
kernel_func_warp=rbf_kernel,
kernel_func_data=rbf_kernel,
fixed_view_idx=fixed_view_idx,
).to(device)
view_idx, Ns, _, _ = model.create_view_idx_dict(data_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
def train(model, loss_fn, optimizer):
model.train()
# Forward pass
G_means, G_samples, F_latent_samples, F_samples = model.forward(
{"expression": x}, view_idx=view_idx, Ns=Ns, S=5
)
# Compute loss
loss = loss_fn(data_dict, F_samples)
# Compute gradients and take optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
loss_trace = []
error_trace = []
convergence_checker = LossNotDecreasingChecker(max_epochs=n_epochs, atol=1e-4)
for t in range(n_epochs):
loss = train(model, model.loss_fn, optimizer)
loss_trace.append(loss)
# has_converged = convergence_checker.check_loss(t, loss_trace)
# if has_converged:
# print("Convergence criterion met.")
# break
if t % PRINT_EVERY == 0:
print("Iter: {0:<10} LL {1:1.3e}".format(t, -loss))
G_means, G_samples, F_latent_samples, F_samples = model.forward(
{"expression": x}, view_idx=view_idx, Ns=Ns
)
print("Done!")
return G_means["expression"].detach().numpy()
if __name__ == "__main__":
## Generate data
n_outputs = 30
n_latent_gps = {"expression": 5}
warp_kernel_variance = 0.5
noise_variance = 0.001
fixed_view_data = 0
X, Y, n_samples_list, view_idx = generate_twod_data(
n_views,
n_outputs,
grid_size=10,
n_latent_gps=n_latent_gps["expression"],
kernel_lengthscale=5.0,
kernel_variance=warp_kernel_variance,
noise_variance=noise_variance,
fixed_view_idx=fixed_view_data,
)
n_samples_per_view = X.shape[0] // n_views
## Set up figure
plt.figure(figsize=(18, 5))
## Plot data
markers = ["o", "X"]
plt.subplot(131)
for vv in range(n_views):
plt.scatter(
X[view_idx[vv], 0],
X[view_idx[vv], 1],
c=Y[view_idx[vv], 0],
marker=markers[vv],
s=300,
linewidth=1.8,
edgecolor="black",
)
plt.title("Data")
plt.xlabel("Spatial 1")
plt.ylabel("Spatial 2")
## De novo
aligned_coords_denovo = two_d_gpsa(
n_epochs=N_EPOCHS,
n_outputs=n_outputs,
warp_kernel_variance=warp_kernel_variance,
noise_variance=noise_variance,
n_latent_gps=n_latent_gps,
fixed_view_idx=None,
)
plt.subplot(132)
for vv in range(n_views):
plt.scatter(
aligned_coords_denovo[view_idx[vv], 0],
aligned_coords_denovo[view_idx[vv], 1],
c=Y[view_idx[vv], 0],
marker=markers[vv],
s=300,
linewidth=1.8,
edgecolor="black",
)
plt.title(r"$\emph{De novo}$ alignment")
plt.xlabel("Spatial 1")
plt.ylabel("Spatial 2")
## Template-based
aligned_coords_template = two_d_gpsa(
n_epochs=N_EPOCHS,
n_outputs=n_outputs,
warp_kernel_variance=warp_kernel_variance,
noise_variance=noise_variance,
n_latent_gps=n_latent_gps,
fixed_view_idx=0,
)
plt.subplot(133)
for vv in range(n_views):
plt.scatter(
aligned_coords_template[view_idx[vv], 0],
aligned_coords_template[view_idx[vv], 1],
c=Y[view_idx[vv], 0],
marker=markers[vv],
s=300,
linewidth=1.8,
edgecolor="black",
label="Slice {}".format(vv + 1),
)
plt.title(r"$\emph{Template-based}$ alignment")
plt.xlabel("Spatial 1")
plt.ylabel("Spatial 2")
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.savefig("./out/two_d_denovo_vs_templatebased.png")
plt.show()
denovo_error = np.mean(
np.sum(
(aligned_coords_denovo[view_idx[0]] - aligned_coords_denovo[view_idx[1]])
** 2,
axis=1,
)
)
templatebased_error = np.mean(
np.sum(
(
aligned_coords_template[view_idx[0]]
- aligned_coords_template[view_idx[1]]
)
** 2,
axis=1,
)
)
original_error = np.mean(np.sum((X[view_idx[0]] - X[view_idx[1]]) ** 2, axis=1))
# De novo error: 0.000536963
# Template error: 0.007253051
# Observed data error: 0.7329880727046506
import ipdb
ipdb.set_trace()
| [
"torch.from_numpy",
"torch.cuda.is_available"
] | 1.8.0 | giovp/spatial-alignment | b03a6508ba581246a3f6367217b2f8df5dcd15d4 |
1.8 | import numpy as np
import pandas as pd
import numpy.random as npr
import torch
from scipy.special import xlogy
def rbf_kernel(
x1, x2, lengthscale_unconstrained, output_variance_unconstrained, diag=False
):
lengthscale = torch.exp(lengthscale_unconstrained)
output_variance = torch.exp(output_variance_unconstrained)
if diag:
diffs = x1 - x2
else:
diffs = x1.unsqueeze(-2) - x2.unsqueeze(-3)
K = output_variance * torch.exp(
-0.5 * torch.sum(torch.square(diffs / lengthscale), dim=-1)
)
return K
def rbf_kernel_numpy(x, xp, kernel_params):
output_scale = np.exp(kernel_params[0])
lengthscales = np.exp(kernel_params[1:])
diffs = np.expand_dims(x / lengthscales, 1) - np.expand_dims(xp / lengthscales, 0)
return output_scale * np.exp(-0.5 * np.sum(diffs ** 2, axis=2))
def matern12_kernel(
x1, x2, lengthscale_unconstrained, output_variance_unconstrained, diag=False
):
lengthscale = torch.exp(lengthscale_unconstrained)
output_variance = torch.exp(output_variance_unconstrained)
if diag:
diffs = x1 - x2
else:
diffs = x1.unsqueeze(-2) - x2.unsqueeze(-3)
eps = 1e-10
dists = torch.sqrt(torch.sum(torch.square(diffs), dim=-1) + eps)
return output_variance * torch.exp(-0.5 * dists / lengthscale)
def matern32_kernel(
x1, x2, lengthscale_unconstrained, output_variance_unconstrained, diag=False
):
lengthscale = torch.exp(lengthscale_unconstrained)
output_variance = torch.exp(output_variance_unconstrained)
if diag:
diffs = x1 - x2
else:
diffs = x1.unsqueeze(-2) - x2.unsqueeze(-3)
eps = 1e-10
dists = torch.sqrt(torch.sum(torch.square(diffs), dim=-1) + eps)
inner_term = np.sqrt(3.0) * dists / lengthscale
K = output_variance * (1 + inner_term) * torch.exp(-inner_term)
return K
def polar_warp(X, r, theta):
return np.array([X[:, 0] + r * np.cos(theta), X[:, 1] + r * np.sin(theta)]).T
def get_st_coordinates(df):
"""
Extracts spatial coordinates from ST data with index in 'AxB' type format.
Return: pandas dataframe of coordinates
"""
coor = []
for spot in df.index:
coordinates = spot.split("x")
coordinates = [float(i) for i in coordinates]
coor.append(coordinates)
return np.array(coor)
def compute_distance(X1, X2):
return np.mean(np.sqrt(np.sum((X1 - X2) ** 2, axis=1)))
def make_pinwheel(
radial_std, tangential_std, num_classes, num_per_class, rate, rs=npr.RandomState(0)
):
"""Based on code by Ryan P. Adams."""
rads = np.linspace(0, 2 * np.pi, num_classes, endpoint=False)
features = rs.randn(num_classes * num_per_class, 2) * np.array(
[radial_std, tangential_std]
)
features[:, 0] += 1
labels = np.repeat(np.arange(num_classes), num_per_class)
angles = rads[labels] + rate * np.exp(features[:, 0])
rotations = np.stack(
[np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]
)
rotations = np.reshape(rotations.T, (-1, 2, 2))
return np.einsum("ti,tij->tj", features, rotations)
class ConvergenceChecker(object):
def __init__(self, span, dtp="float64"):
self.span = span
x = np.arange(span, dtype=dtp)
x -= x.mean()
X = np.column_stack((np.ones(shape=x.shape), x, x ** 2, x ** 3))
self.U = np.linalg.svd(X, full_matrices=False)[0]
def smooth(self, y):
return self.U @ (self.U.T @ y)
def subset(self, y, idx=-1):
span = self.U.shape[0]
lo = idx - span + 1
if idx == -1:
return y[lo:]
else:
return y[lo : (idx + 1)]
def relative_change(self, y, idx=-1, smooth=True):
y = self.subset(y, idx=idx)
if smooth:
y = self.smooth(y)
prev = y[-2]
return (y[-1] - prev) / (0.1 + abs(prev))
def converged(self, y, tol=1e-4, **kwargs):
return abs(self.relative_change(y, **kwargs)) < tol
def relative_change_all(self, y, smooth=True):
n = len(y)
span = self.U.shape[0]
cc = np.tile([np.nan], n)
for i in range(span, n):
cc[i] = self.relative_change(y, idx=i, smooth=smooth)
return cc
def converged_all(self, y, tol=1e-4, smooth=True):
cc = self.relative_change_all(y, smooth=smooth)
return np.abs(cc) < tol
# Function for computing size factors
def compute_size_factors(m):
# given matrix m with samples in the columns
# compute size factors
sz = np.sum(m.values, axis=0) # column sums (sum of counts in each cell)
lsz = np.log(sz)
# make geometric mean of sz be 1 for poisson
sz_poisson = np.exp(lsz - np.mean(lsz))
return sz_poisson
def poisson_deviance(X, sz):
LP = X.values / sz # recycling
# import ipdb; ipdb.set_trace()
LP[LP > 0] = np.log(LP[LP > 0]) # log transform nonzero elements only
# Transpose to make features in cols, observations in rows
X = X.T
ll_sat = np.sum(np.multiply(X, LP.T), axis=0)
feature_sums = np.sum(X, axis=0)
ll_null = feature_sums * np.log(feature_sums / np.sum(sz))
return 2 * (ll_sat - ll_null)
def deviance_feature_selection(X):
# Remove cells without any counts
X = X[np.sum(X, axis=1) > 0]
# Compute size factors
sz = compute_size_factors(X)
# Compute deviances
devs = poisson_deviance(X, sz)
# Get associated gene names
gene_names = X.index.values
assert gene_names.shape[0] == devs.values.shape[0]
return devs.values, gene_names
def deviance_residuals(x, theta, mu=None):
"""Computes deviance residuals for NB model with a fixed theta"""
if mu is None:
counts_sum0 = np.sum(x, axis=0, keepdims=True)
counts_sum1 = np.sum(x, axis=1, keepdims=True)
counts_sum = np.sum(x)
# get residuals
mu = counts_sum1 @ counts_sum0 / counts_sum
def remove_negatives(sqrt_term):
negatives_idx = sqrt_term < 0
if np.any(negatives_idx):
n_negatives = np.sum(negatives_idx)
print(
"Setting %u negative sqrt term values to 0 (%f%%)"
% (n_negatives, n_negatives / np.product(sqrt_term.shape))
)
sqrt_term[negatives_idx] = 0
if np.isinf(theta): ### POISSON
x_minus_mu = x - mu
sqrt_term = 2 * (
xlogy(x, x / mu) - x_minus_mu
) # xlogy(x,x/mu) computes xlog(x/mu) and returns 0 if x=0
remove_negatives(sqrt_term)
dev = np.sign(x_minus_mu) * np.sqrt(sqrt_term)
else: ### NEG BIN
x_plus_theta = x + theta
sqrt_term = 2 * (
xlogy(x, x / mu) - (x_plus_theta) * np.log(x_plus_theta / (mu + theta))
) # xlogy(x,x/mu) computes xlog(x/mu) and returns 0 if x=0
remove_negatives(sqrt_term)
dev = np.sign(x - mu) * np.sqrt(sqrt_term)
return dev
def pearson_residuals(counts, theta, clipping=True):
"""Computes analytical residuals for NB model with a fixed theta, clipping outlier residuals to sqrt(N)"""
counts_sum0 = np.sum(counts, axis=0, keepdims=True)
counts_sum1 = np.sum(counts, axis=1, keepdims=True)
counts_sum = np.sum(counts)
# get residuals
mu = counts_sum1 @ counts_sum0 / counts_sum
z = (counts - mu) / np.sqrt(mu + mu ** 2 / theta)
# clip to sqrt(n)
if clipping:
n = counts.shape[0]
z[z > np.sqrt(n)] = np.sqrt(n)
z[z < -np.sqrt(n)] = -np.sqrt(n)
return z
class LossNotDecreasingChecker():
def __init__(self, max_epochs, atol=1e-2, window_size=10):
self.max_epochs = max_epochs
self.atol = atol
self.window_size = window_size
self.decrease_in_loss = np.zeros(max_epochs)
self.average_decrease_in_loss = np.zeros(max_epochs)
def check_loss(self, iternum, loss_trace):
if iternum >= 1:
self.decrease_in_loss[iternum] = loss_trace[iternum-1] - loss_trace[iternum]
if iternum >= self.window_size:
self.average_decrease_in_loss[iternum] = np.mean(self.decrease_in_loss[iternum - self.window_size + 1:iternum])
has_converged = (self.average_decrease_in_loss[iternum] < self.atol)
return has_converged
return False
| [
"torch.exp",
"torch.square"
] | 1.8.0 | giovp/spatial-alignment | b03a6508ba581246a3f6367217b2f8df5dcd15d4 |
1.8 | import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from gpsa import VariationalGPSA, LossNotDecreasingChecker
sys.path.append("../../data")
from simulated.generate_oned_data import (
generate_oned_data_affine_warp,
generate_oned_data_gp_warp,
)
from gpsa.plotting import callback_oned
device = "cuda" if torch.cuda.is_available() else "cpu"
LATEX_FONTSIZE = 30
n_spatial_dims = 1
n_views = 2
n_outputs = 50
n_samples_per_view = 100
m_G = 10
m_X_per_view = 10
N_EPOCHS = 10_000
PRINT_EVERY = 25
N_LATENT_GPS = {"expression": 1}
NOISE_VARIANCE = 0.01
X, Y, n_samples_list, view_idx = generate_oned_data_gp_warp(
n_views,
n_outputs,
n_samples_per_view,
noise_variance=NOISE_VARIANCE,
n_latent_gps=N_LATENT_GPS["expression"],
kernel_variance=0.25,
kernel_lengthscale=10.0,
)
x = torch.from_numpy(X).float().clone()
y = torch.from_numpy(Y).float().clone()
data_dict = {
"expression": {
"spatial_coords": x,
"outputs": y,
"n_samples_list": n_samples_list,
}
}
model = VariationalGPSA(
data_dict,
n_spatial_dims=n_spatial_dims,
m_X_per_view=m_X_per_view,
m_G=m_G,
data_init=True,
minmax_init=False,
grid_init=False,
n_latent_gps=N_LATENT_GPS,
mean_function="identity_fixed",
fixed_warp_kernel_variances=np.ones(n_views) * 0.1,
fixed_warp_kernel_lengthscales=np.ones(n_views) * 10,
).to(device)
view_idx, Ns, _, _ = model.create_view_idx_dict(data_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-1)
def train(model, loss_fn, optimizer):
model.train()
# Forward pass
G_means, G_samples, F_latent_samples, F_samples = model.forward(
{"expression": x},
view_idx=view_idx,
Ns=Ns,
S=5,
)
# Compute loss
loss = loss_fn(data_dict, F_samples)
# Compute gradients and take optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
# Set up figure.
fig = plt.figure(figsize=(14, 7), facecolor="white")
data_expression_ax = fig.add_subplot(212, frameon=False)
latent_expression_ax = fig.add_subplot(211, frameon=False)
plt.show(block=False)
loss_trace = []
error_trace = []
convergence_checker = LossNotDecreasingChecker(max_epochs=N_EPOCHS, atol=1e-4)
for t in range(N_EPOCHS):
loss = train(model, model.loss_fn, optimizer)
loss_trace.append(loss)
has_converged = convergence_checker.check_loss(t, loss_trace)
if has_converged:
print("Convergence criterion met.")
break
if t % PRINT_EVERY == 0:
print("Iter: {0:<10} LL {1:1.3e}".format(t, -loss))
G_means, G_samples, F_latent_samples, F_samples = model.forward(
{"expression": x}, view_idx=view_idx, Ns=Ns, S=3
)
callback_oned(
model,
X,
Y=Y,
X_aligned=G_means,
data_expression_ax=data_expression_ax,
latent_expression_ax=latent_expression_ax,
)
err = np.mean(
(
G_means["expression"].detach().numpy().squeeze()[:n_samples_per_view]
- G_means["expression"].detach().numpy().squeeze()[n_samples_per_view:]
)
** 2
)
print("Error: {}".format(err))
error_trace.append(loss)
print("Done!")
plt.close()
G_means, G_samples, F_latent_samples, F_samples = model.forward(
{"expression": x}, view_idx=view_idx, Ns=Ns, S=3
)
err_unaligned = np.mean((X[:n_samples_per_view] - X[n_samples_per_view:]) ** 2)
err_aligned = np.mean(
(
G_means["expression"].detach().numpy().squeeze()[:n_samples_per_view]
- G_means["expression"].detach().numpy().squeeze()[n_samples_per_view:]
)
** 2
)
print("Pre-alignment error: {}".format(err_unaligned))
print("Post-alignment error: {}".format(err_aligned))
import matplotlib
font = {"size": LATEX_FONTSIZE}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
fig = plt.figure(figsize=(10, 10))
data_expression_ax = fig.add_subplot(211, frameon=False)
latent_expression_ax = fig.add_subplot(212, frameon=False)
callback_oned(
model,
X,
Y=Y,
X_aligned=G_means,
data_expression_ax=data_expression_ax,
latent_expression_ax=latent_expression_ax,
)
plt.tight_layout()
plt.savefig("../../plots/one_d_simulation.png")
plt.show()
import ipdb
ipdb.set_trace()
| [
"torch.from_numpy",
"torch.cuda.is_available"
] | 1.8.0 | giovp/spatial-alignment | b03a6508ba581246a3f6367217b2f8df5dcd15d4 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Union
import torch
from monai.metrics.utils import do_metric_reduction
from monai.utils import MetricReduction
class RegressionMetric(ABC):
def __init__(self, reduction: Union[MetricReduction, str] = MetricReduction.MEAN) -> None:
super().__init__()
self.reduction = reduction
def _reduce(self, f: torch.Tensor):
return do_metric_reduction(f, self.reduction)
def _check_shape(self, y_pred: torch.Tensor, y: torch.Tensor) -> None:
if y_pred.shape != y.shape:
raise ValueError(
"y_pred and y shapes dont match, received y_pred: [{}] and y: [{}]".format(y_pred.shape, y.shape)
)
# also check if there is atleast one non-batch dimension i.e. num_dims >= 2
if len(y_pred.shape) < 2:
raise ValueError("either channel or spatial dimensions required, found only batch dimension")
@abstractmethod
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
def __call__(self, y_pred: torch.Tensor, y: torch.Tensor):
self._check_shape(y_pred, y)
out = self._compute_metric(y_pred, y)
y, not_nans = self._reduce(out)
return y, not_nans
class MSEMetric(RegressionMetric):
r"""Compute Mean Squared Error between two tensors using function:
.. math::
\operatorname {MSE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i} \right)^{2}.
More info: https://en.wikipedia.org/wiki/Mean_squared_error
Input `y_pred` (BCHW[D] where C is number of channels) is compared with ground truth `y` (BCHW[D]).
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(self, reduction: Union[MetricReduction, str] = MetricReduction.MEAN) -> None:
super().__init__(reduction=reduction)
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
return mse_out
class MSEMetricTraining(RegressionMetric):
r"""Compute Mean Squared Error between two tensors using function:
.. math::
\operatorname {MSE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i} \right)^{2}.
More info: https://en.wikipedia.org/wiki/Mean_squared_error
Input `y_pred` (BCHW[D] where C is number of channels) is compared with ground truth `y` (BCHW[D]).
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(self, reduction: Union[MetricReduction, str] = MetricReduction.MEAN) -> None:
super().__init__(reduction=reduction)
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
return 1 - (100 * mse_out)
class MAEMetric(RegressionMetric):
r"""Compute Mean Absolute Error between two tensors using function:
.. math::
\operatorname {MAE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left|y_i-\hat{y_i}\right|.
More info: https://en.wikipedia.org/wiki/Mean_absolute_error
Input `y_pred` (BCHW[D] where C is number of channels) is compared with ground truth `y` (BCHW[D]).
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(self, reduction: Union[MetricReduction, str] = MetricReduction.MEAN) -> None:
super().__init__(reduction=reduction)
self.abs_func = torch.abs
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mae_out = compute_mean_error_metrics(y_pred, y, func=self.abs_func)
return mae_out
class MAEMetricTraining(RegressionMetric):
r"""Compute Mean Absolute Error between two tensors using function:
.. math::
\operatorname {MAE}\left(Y, \hat{Y}\right) =\frac {1}{n}\sum _{i=1}^{n}\left|y_i-\hat{y_i}\right|.
More info: https://en.wikipedia.org/wiki/Mean_absolute_error
Input `y_pred` (BCHW[D] where C is number of channels) is compared with ground truth `y` (BCHW[D]).
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(self, reduction: Union[MetricReduction, str] = MetricReduction.MEAN) -> None:
super().__init__(reduction=reduction)
self.abs_func = torch.abs
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mae_out = compute_mean_error_metrics(y_pred, y, func=self.abs_func)
return 1 - (100 * mae_out)
class RMSEMetric(RegressionMetric):
r"""Compute Root Mean Squared Error between two tensors using function:
.. math::
\operatorname {RMSE}\left(Y, \hat{Y}\right) ={ \sqrt{ \frac {1}{n}\sum _{i=1}^{n}\left(y_i-\hat{y_i}\right)^2 } } \
= \sqrt {\operatorname{MSE}\left(Y, \hat{Y}\right)}.
More info: https://en.wikipedia.org/wiki/Root-mean-square_deviation
Input `y_pred` (BCHW[D] where C is number of channels) is compared with ground truth `y` (BCHW[D]).
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(self, reduction: Union[MetricReduction, str] = MetricReduction.MEAN) -> None:
super().__init__(reduction=reduction)
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
rmse_out = torch.sqrt(mse_out)
return rmse_out
class PSNRMetric(RegressionMetric):
r"""Compute Peak Signal To Noise Ratio between two tensors using function:
.. math::
\operatorname{PSNR}\left(Y, \hat{Y}\right) = 20 \cdot \log_{10} \left({\mathit{MAX}}_Y\right) \
-10 \cdot \log_{10}\left(\operatorname{MSE\left(Y, \hat{Y}\right)}\right)
More info: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Help taken from:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py line 4139
Input `y_pred` (BCHW[D] where C is number of channels) is compared with ground truth `y` (BCHW[D]).
Both `y_pred` and `y` are expected to be real-valued, where `y_pred` is output from a regression model.
Args:
max_val: The dynamic range of the images/volumes (i.e., the difference between the
maximum and the minimum allowed values e.g. 255 for a uint8 image).
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(
self, max_val: Union[int, float], reduction: Union[MetricReduction, str] = MetricReduction.MEAN
) -> None:
super().__init__(reduction=reduction)
self.max_val = max_val
self.sq_func = partial(torch.pow, exponent=2.0)
def _compute_metric(self, y_pred: torch.Tensor, y: torch.Tensor) -> Any:
y_pred = y_pred.float()
y = y.float()
mse_out = compute_mean_error_metrics(y_pred, y, func=self.sq_func)
psnr_val = 20 * math.log10(self.max_val) - 10 * torch.log10(mse_out)
return psnr_val
def compute_mean_error_metrics(y_pred: torch.Tensor, y: torch.Tensor, func) -> torch.Tensor:
# reducing in only channel + spatial dimensions (not batch)
# reducion of batch handled inside __call__() using do_metric_reduction() in respective calling class
flt = partial(torch.flatten, start_dim=1)
error_metric = torch.mean(flt(func(y - y_pred)), dim=-1, keepdim=True)
return error_metric
| [
"torch.sqrt",
"torch.log10"
] | 1.5 | davidiommi/MONAI | c470c1a67b33d7dbbce0f8b8c5ffdad84b76d60f |
1.8 | """
Model dependent data transforms that apply MRAugment to
training data before fed to the model.
Modified from https://github.com/facebookresearch/fastMRI/blob/master/fastmri/data/transforms.py
"""
from typing import Dict, Optional, Sequence, Tuple, Union
import fastmri
import numpy as np
import torch
from fastmri.data.subsample import MaskFunc
from fastmri.data.transforms import to_tensor, apply_mask
class VarNetDataTransform:
"""
Data Transformer for training VarNet models with added MRAugment data augmentation.
"""
def __init__(self, augmentor = None, mask_func: Optional[MaskFunc] = None, use_seed: bool = True):
"""
Args:
augmentor: DataAugmentor object that encompasses the MRAugment pipeline and
schedules the augmentation probability
mask_func: Optional; A function that can create a mask of
appropriate shape. Defaults to None.
use_seed: If True, this class computes a pseudo random number
generator seed from the filename. This ensures that the same
mask is used for all the slices of a given volume every time.
"""
self.mask_func = mask_func
self.use_seed = use_seed
if augmentor is not None:
self.use_augment = True
self.augmentor = augmentor
else:
self.use_augment = False
def __call__(
self,
kspace: np.ndarray,
mask: np.ndarray,
target: np.ndarray,
attrs: Dict,
fname: str,
slice_num: int,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, str, int, float, torch.Tensor]:
"""
Args:
kspace: Input k-space of shape (num_coils, rows, cols) for
multi-coil data.
mask: Mask from the test dataset.
target: Target image.
attrs: Acquisition related information stored in the HDF5 object.
fname: File name.
slice_num: Serial number of the slice.
Returns:
tuple containing:
masked_kspace: k-space after applying sampling mask.
mask: The applied sampling mask
target: The target image (if applicable).
fname: File name.
slice_num: The slice index.
max_value: Maximum image value.
crop_size: The size to crop the final image.
"""
# Make sure data types match
kspace = kspace.astype(np.complex64)
target = target.astype(np.float32)
if target is not None:
target = to_tensor(target)
max_value = attrs["max"]
else:
target = torch.tensor(0)
max_value = 0.0
kspace = to_tensor(kspace)
# Apply augmentations if needed
if self.use_augment:
if self.augmentor.schedule_p() > 0.0:
kspace, target = self.augmentor(kspace, target.shape)
# Add singleton channel dimension if singlecoil
if len(kspace.shape) == 3:
kspace.unsqueeze_(0)
assert len(kspace.shape) == 4
seed = None if not self.use_seed else tuple(map(ord, fname))
acq_start = attrs["padding_left"]
acq_end = attrs["padding_right"]
crop_size = torch.tensor([target.shape[0], target.shape[1]])
if self.mask_func:
masked_kspace, mask = apply_mask(
kspace, self.mask_func, seed, (acq_start, acq_end)
)
else:
masked_kspace = kspace
shape = np.array(kspace.shape)
num_cols = shape[-2]
shape[:-3] = 1
mask_shape = [1] * len(shape)
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
mask = mask.reshape(*mask_shape)
mask[:, :, :acq_start] = 0
mask[:, :, acq_end:] = 0
return (
masked_kspace,
mask.byte(),
target,
fname,
slice_num,
max_value,
crop_size,
)
def seed_pipeline(self, seed):
"""
Sets random seed for the MRAugment pipeline. It is important to provide
different seed to different workers and across different GPUs to keep
the augmentations diverse.
For an example how to set it see worker_init in pl_modules/fastmri_data_module.py
"""
if self.use_augment:
if self.augmentor.aug_on:
self.augmentor.augmentation_pipeline.rng.seed(seed) | [
"torch.tensor"
] | 1.8.1 | z-fabian/MRAugment | 88dd0649f05b2dd43bf967e8b92eaf2d5daab42d |
1.5 | import copy
import datetime
import inspect
import logging
import os
import sys
import time
import warnings
from inspect import signature
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Type, Union, cast
import torch
from torch.optim.sgd import SGD
from torch.utils.data.dataset import ConcatDataset
from flair.nn import Model
try:
from apex import amp
except ImportError:
amp = None
import random
from torch.optim.lr_scheduler import OneCycleLR # type: ignore
import flair
import flair.nn
from flair.data import Corpus, Dictionary, MultiCorpus, _len_dataset
from flair.datasets import DataLoader
from flair.optim import ExpAnnealLR, LinearSchedulerWithWarmup
from flair.training_utils import (
AnnealOnPlateau,
WeightExtractor,
add_file_handler,
identify_dynamic_embeddings,
init_output_file,
log_line,
store_embeddings,
)
log = logging.getLogger("flair")
class ModelTrainer:
def __init__(
self,
model: flair.nn.Model,
corpus: Corpus,
):
"""
Initialize a model trainer
:param model: The model that you want to train. The model should inherit from flair.nn.Model # noqa: E501
:param corpus: The dataset used to train the model, should be of type Corpus
"""
self.model: flair.nn.Model = model
self.corpus: Corpus = corpus
@staticmethod
def check_for_and_delete_previous_best_models(base_path):
all_best_model_names = [filename for filename in os.listdir(base_path) if filename.startswith("best-model")]
if len(all_best_model_names) != 0:
warnings.warn(
"There should be no best model saved at epoch 1 except there "
"is a model from previous trainings"
" in your training folder. All previous best models will be deleted."
)
for single_model in all_best_model_names:
previous_best_path = os.path.join(base_path, single_model)
if os.path.exists(previous_best_path):
os.remove(previous_best_path)
def train(
self,
base_path: Union[Path, str],
learning_rate: float = 0.1,
mini_batch_size: int = 32,
eval_batch_size: int = None,
mini_batch_chunk_size: Optional[int] = None,
max_epochs: int = 100,
train_with_dev: bool = False,
train_with_test: bool = False,
monitor_train: bool = False,
monitor_test: bool = False,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
scheduler=AnnealOnPlateau,
anneal_factor: float = 0.5,
patience: int = 3,
min_learning_rate: float = 0.0001,
initial_extra_patience: int = 0,
optimizer: Union[torch.optim.Optimizer, Type[torch.optim.Optimizer]] = SGD,
cycle_momentum: bool = False,
warmup_fraction: float = 0.1,
embeddings_storage_mode: str = "cpu",
checkpoint: bool = False,
save_final_model: bool = True,
anneal_with_restarts: bool = False,
anneal_with_prestarts: bool = False,
anneal_against_dev_loss: bool = False,
batch_growth_annealing: bool = False,
shuffle: bool = True,
param_selection_mode: bool = False,
write_weights: bool = False,
num_workers: Optional[int] = None,
sampler=None,
use_amp: bool = False,
amp_opt_level: str = "O1",
eval_on_train_fraction: float = 0.0,
eval_on_train_shuffle: bool = False,
save_model_each_k_epochs: int = 0,
tensorboard_comment: str = "",
use_swa: bool = False,
use_final_model_for_eval: bool = False,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
create_file_logs: bool = True,
create_loss_file: bool = True,
epoch: int = 0,
use_tensorboard: bool = False,
tensorboard_log_dir=None,
metrics_for_tensorboard=[],
optimizer_state_dict: Optional[Dict[str, Any]] = None,
scheduler_state_dict: Optional[Dict[str, Any]] = None,
save_optimizer_state: bool = False,
**kwargs,
) -> dict:
"""
Trains any class that implements the flair.nn.Model interface.
:param base_path: Main path to which all output during training is logged and models are saved # noqa: E501
:param learning_rate: Initial learning rate (or max, if scheduler is OneCycleLR) # noqa: E501
:param mini_batch_size: Size of mini-batches during training
:param mini_batch_chunk_size: If mini-batches are larger than this number, they get broken down into chunks of this size for processing purposes # noqa: E501
:param max_epochs: Maximum number of epochs to train. Terminates training if this number is surpassed. # noqa: E501
:param scheduler: The learning rate scheduler to use
:param checkpoint: If True, a full checkpoint is saved at end of each epoch # noqa: E501
:param cycle_momentum: If scheduler is OneCycleLR, whether the scheduler should cycle also the momentum # noqa: E501
:param anneal_factor: The factor by which the learning rate is annealed
:param patience: Patience is the number of epochs with no improvement the Trainer waits # noqa: E501
until annealing the learning rate
:param min_learning_rate: If the learning rate falls below this threshold, training terminates # noqa: E501
:param warmup_fraction: Fraction of warmup steps if the scheduler is LinearSchedulerWithWarmup # noqa: E501
:param train_with_dev: If True, the data from dev split is added to the training data # noqa: E501
:param train_with_test: If True, the data from test split is added to the training data # noqa: E501
:param monitor_train: If True, training data is evaluated at end of each epoch
:param monitor_test: If True, test data is evaluated at end of each epoch
:param embeddings_storage_mode: One of 'none' (all embeddings are deleted and freshly recomputed), # noqa: E501
'cpu' (embeddings are stored on CPU) or 'gpu' (embeddings are stored on GPU)
:param save_final_model: If True, final model is saved
:param anneal_with_restarts: If True, the last best model is restored when annealing the learning rate # noqa: E501
:param shuffle: If True, data is shuffled during training
:param param_selection_mode: If True, testing is performed against dev data. Use this mode when doing # noqa: E501
parameter selection.
:param num_workers: Number of workers in your data loader.
:param sampler: You can pass a data sampler here for special sampling of data. # noqa: E501
:param eval_on_train_fraction: the fraction of train data to do the evaluation on, # noqa: E501
if 0. the evaluation is not performed on fraction of training data,
if 'dev' the size is determined from dev set size
:param eval_on_train_shuffle: if True the train data fraction is determined on the start of training # noqa: E501
and kept fixed during training, otherwise it's sampled at beginning of each epoch # noqa: E501
:param save_model_each_k_epochs: Each k epochs, a model state will be written out. If set to '5', a model will # noqa: E501
be saved each 5 epochs. Default is 0 which means no model saving.
:param main_evaluation_metric: Type of metric to use for best model tracking and learning rate scheduling (if dev data is available, otherwise loss will be used), currently only applicable for text_classification_model # noqa: E501
:param tensorboard_comment: Comment to use for tensorboard logging
:param create_file_logs: If True, the logs will also be stored in a file 'training.log' in the model folder # noqa: E501
:param create_loss_file: If True, the loss will be writen to a file 'loss.tsv' in the model folder # noqa: E501
:param optimizer: The optimizer to use (typically SGD or Adam)
:param epoch: The starting epoch (normally 0 but could be higher if you continue training model) # noqa: E501
:param use_tensorboard: If True, writes out tensorboard information
:param tensorboard_log_dir: Directory into which tensorboard log files will be written # noqa: E501
:param metrics_for_tensorboard: List of tuples that specify which metrics (in addition to the main_score) shall be plotted in tensorboard, could be [("macro avg", 'f1-score'), ("macro avg", 'precision')] for example # noqa: E501
:param kwargs: Other arguments for the Optimizer
:return:
"""
# create a model card for this model with Flair and PyTorch version
model_card: Dict[str, Any] = {
"flair_version": flair.__version__,
"pytorch_version": torch.__version__,
}
# also record Transformers version if library is loaded
try:
import transformers
model_card["transformers_version"] = transformers.__version__
except ImportError:
pass
# remember all parameters used in train() call
local_variables = locals()
training_parameters = {}
for parameter in signature(self.train).parameters:
training_parameters[parameter] = local_variables[parameter]
model_card["training_parameters"] = training_parameters
# add model card to model
self.model.model_card = model_card
assert self.corpus.train
if use_tensorboard:
try:
from torch.utils.tensorboard import SummaryWriter
if tensorboard_log_dir is not None and not os.path.exists(tensorboard_log_dir):
os.mkdir(tensorboard_log_dir)
writer = SummaryWriter(log_dir=tensorboard_log_dir, comment=tensorboard_comment)
log.info(f"tensorboard logging path is {tensorboard_log_dir}")
except ImportError:
log_line(log)
log.warning("ATTENTION! PyTorch >= 1.1.0 and pillow are required" "for TensorBoard support!")
log_line(log)
use_tensorboard = False
pass
if use_amp:
if sys.version_info < (3, 0):
raise RuntimeError("Apex currently only supports Python 3. Aborting.")
if amp is None:
raise RuntimeError(
"Failed to import apex. Please install apex from "
"https://www.github.com/nvidia/apex "
"to enable mixed-precision training."
)
if not eval_batch_size:
eval_batch_size = mini_batch_size
if mini_batch_chunk_size is None:
mini_batch_chunk_size = mini_batch_size
if learning_rate < min_learning_rate:
min_learning_rate = learning_rate / 10
initial_learning_rate = learning_rate
base_path = Path(base_path)
base_path.mkdir(exist_ok=True, parents=True)
self.check_for_and_delete_previous_best_models(base_path)
# determine what splits (train, dev, test) to evaluate and log
log_train = True if monitor_train else False
log_test = True if (not param_selection_mode and self.corpus.test and monitor_test) else False
log_dev = False if train_with_dev or not self.corpus.dev else True
log_train_part = True if (eval_on_train_fraction == "dev" or eval_on_train_fraction > 0.0) else False
if log_train_part:
train_part_size = (
_len_dataset(self.corpus.dev)
if eval_on_train_fraction == "dev"
else int(_len_dataset(self.corpus.train) * eval_on_train_fraction)
)
assert train_part_size > 0
if not eval_on_train_shuffle:
train_part_indices = list(range(train_part_size))
train_part = torch.utils.data.dataset.Subset(self.corpus.train, train_part_indices)
# prepare loss logging file and set up header
loss_txt = init_output_file(base_path, "loss.tsv") if create_loss_file else None
weight_extractor = WeightExtractor(base_path)
# if optimizer class is passed, instantiate:
if not isinstance(optimizer, torch.optim.Optimizer):
kwargs["lr"] = learning_rate
optimizer = optimizer(self.model.parameters(), **kwargs)
if use_swa:
import torchcontrib
optimizer = torchcontrib.optim.SWA(optimizer, swa_start=10, swa_freq=5, swa_lr=learning_rate)
if use_amp:
self.model, optimizer = amp.initialize(self.model, optimizer, opt_level=amp_opt_level)
optimizer = cast(torch.optim.Optimizer, optimizer)
# load existing optimizer state dictionary if it exists
if optimizer_state_dict:
optimizer.load_state_dict(optimizer_state_dict)
# minimize training loss if training with dev data, else maximize dev score
anneal_mode = "min" if train_with_dev or anneal_against_dev_loss else "max"
best_validation_score = 100000000000 if train_with_dev or anneal_against_dev_loss else 0.0
dataset_size = _len_dataset(self.corpus.train)
if train_with_dev:
dataset_size += _len_dataset(self.corpus.dev)
# if scheduler is passed as a class, instantiate
if inspect.isclass(scheduler):
if scheduler == OneCycleLR:
scheduler = OneCycleLR(
optimizer,
max_lr=learning_rate,
steps_per_epoch=dataset_size // mini_batch_size + 1,
epochs=max_epochs - epoch,
# if we load a checkpoint, we have already trained for epoch
pct_start=0.0,
cycle_momentum=cycle_momentum,
)
elif scheduler == LinearSchedulerWithWarmup:
steps_per_epoch = (dataset_size + mini_batch_size - 1) / mini_batch_size
num_train_steps = int(steps_per_epoch * max_epochs)
num_warmup_steps = int(num_train_steps * warmup_fraction)
scheduler = LinearSchedulerWithWarmup(
optimizer,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
)
else:
scheduler = scheduler(
optimizer,
factor=anneal_factor,
patience=patience,
initial_extra_patience=initial_extra_patience,
mode=anneal_mode,
verbose=True,
)
# load existing scheduler state dictionary if it exists
if scheduler_state_dict:
scheduler.load_state_dict(scheduler_state_dict)
# update optimizer and scheduler in model card
model_card["training_parameters"]["optimizer"] = optimizer
model_card["training_parameters"]["scheduler"] = scheduler
if isinstance(scheduler, OneCycleLR) and batch_growth_annealing:
raise ValueError("Batch growth with OneCycle policy is not implemented.")
train_data = self.corpus.train
# if training also uses dev/train data, include in training set
if train_with_dev or train_with_test:
parts = [self.corpus.train]
if train_with_dev and self.corpus.dev:
parts.append(self.corpus.dev)
if train_with_test and self.corpus.test:
parts.append(self.corpus.test)
train_data = ConcatDataset(parts)
# initialize sampler if provided
if sampler is not None:
# init with default values if only class is provided
if inspect.isclass(sampler):
sampler = sampler()
# set dataset to sample from
sampler.set_dataset(train_data)
shuffle = False
dev_score_history = []
dev_loss_history = []
train_loss_history = []
micro_batch_size = mini_batch_chunk_size
# this field stores the names of all dynamic embeddings in the model (determined after first forward pass)
dynamic_embeddings = None
# At any point you can hit Ctrl + C to break out of training early.
try:
if create_file_logs:
log_handler = add_file_handler(log, base_path / "training.log")
else:
log_handler = None
log_line(log)
log.info(f'Model: "{self.model}"')
log_line(log)
log.info(f'Corpus: "{self.corpus}"')
log_line(log)
log.info("Parameters:")
log.info(f' - learning_rate: "{learning_rate}"')
log.info(f' - mini_batch_size: "{mini_batch_size}"')
log.info(f' - patience: "{patience}"')
log.info(f' - anneal_factor: "{anneal_factor}"')
log.info(f' - max_epochs: "{max_epochs}"')
log.info(f' - shuffle: "{shuffle}"')
log.info(f' - train_with_dev: "{train_with_dev}"')
log.info(f' - batch_growth_annealing: "{batch_growth_annealing}"')
log_line(log)
log.info(f'Model training base path: "{base_path}"')
log_line(log)
log.info(f"Device: {flair.device}")
log_line(log)
log.info(f"Embeddings storage mode: {embeddings_storage_mode}")
previous_learning_rate = learning_rate
momentum = 0
for group in optimizer.param_groups:
if "momentum" in group:
momentum = group["momentum"]
for epoch in range(epoch + 1, max_epochs + 1):
log_line(log)
# update epoch in model card
model_card["training_parameters"]["epoch"] = epoch
if anneal_with_prestarts:
last_epoch_model_state_dict = copy.deepcopy(self.model.state_dict())
if eval_on_train_shuffle:
train_part_indices = list(range(_len_dataset(self.corpus.train)))
random.shuffle(train_part_indices)
train_part_indices = train_part_indices[:train_part_size]
train_part = torch.utils.data.dataset.Subset(self.corpus.train, train_part_indices)
# get new learning rate
for group in optimizer.param_groups:
learning_rate = group["lr"]
if learning_rate != previous_learning_rate and batch_growth_annealing:
mini_batch_size *= 2
# reload last best model if annealing with restarts is enabled
if (
(anneal_with_restarts or anneal_with_prestarts)
and learning_rate != previous_learning_rate
and os.path.exists(base_path / "best-model.pt")
):
if anneal_with_restarts:
log.info("resetting to best model")
self.model.load_state_dict(self.model.load(base_path / "best-model.pt").state_dict())
if anneal_with_prestarts:
log.info("resetting to pre-best model")
self.model.load_state_dict(self.model.load(base_path / "pre-best-model.pt").state_dict())
previous_learning_rate = learning_rate
if use_tensorboard:
writer.add_scalar("learning_rate", learning_rate, epoch)
# stop training if learning rate becomes too small
if (
not isinstance(scheduler, (OneCycleLR, LinearSchedulerWithWarmup))
and learning_rate < min_learning_rate
):
log_line(log)
log.info("learning rate too small - quitting training!")
log_line(log)
break
batch_loader = DataLoader(
train_data,
batch_size=mini_batch_size,
shuffle=shuffle if epoch > 1 else False, # never shuffle the first epoch
num_workers=0 if num_workers is None else num_workers,
sampler=sampler,
)
self.model.train()
train_loss: float = 0
seen_batches = 0
total_number_of_batches = len(batch_loader)
modulo = max(1, int(total_number_of_batches / 10))
# process mini-batches
batch_time = 0.0
average_over = 0
for batch_no, batch in enumerate(batch_loader):
start_time = time.time()
# zero the gradients on the model and optimizer
self.model.zero_grad()
optimizer.zero_grad()
# if necessary, make batch_steps
batch_steps = [batch]
if len(batch) > micro_batch_size:
batch_steps = [batch[x : x + micro_batch_size] for x in range(0, len(batch), micro_batch_size)]
# forward and backward for batch
for batch_step in batch_steps:
# forward pass
loss = self.model.forward_loss(batch_step)
if isinstance(loss, tuple):
average_over += loss[1]
loss = loss[0]
# Backward
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss += loss.item()
# identify dynamic embeddings (always deleted) on first sentence
if not dynamic_embeddings:
dynamic_embeddings = identify_dynamic_embeddings(batch[0])
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(batch, embeddings_storage_mode, dynamic_embeddings)
# do the optimizer step
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)
optimizer.step()
# do the scheduler step if one-cycle or linear decay
if isinstance(scheduler, (OneCycleLR, LinearSchedulerWithWarmup)):
scheduler.step()
# get new learning rate
for group in optimizer.param_groups:
learning_rate = group["lr"]
if "momentum" in group:
momentum = group["momentum"]
if "betas" in group:
momentum, _ = group["betas"]
seen_batches += 1
batch_time += time.time() - start_time
if seen_batches % modulo == 0:
momentum_info = f" - momentum: {momentum:.4f}" if cycle_momentum else ""
intermittent_loss = train_loss / average_over if average_over > 0 else train_loss / seen_batches
log.info(
f"epoch {epoch} - iter {seen_batches}/"
f"{total_number_of_batches} - loss "
f"{intermittent_loss:.8f} - samples/sec:"
f" {mini_batch_size * modulo / batch_time:.2f}"
f" - lr: {learning_rate:.6f}{momentum_info}"
)
batch_time = 0.0
iteration = epoch * total_number_of_batches + batch_no
if not param_selection_mode and write_weights:
weight_extractor.extract_weights(self.model.state_dict(), iteration)
if average_over != 0:
train_loss /= average_over
self.model.eval()
if save_model_each_k_epochs > 0 and epoch % save_model_each_k_epochs == 0:
print("saving model of current epoch")
model_name = "model_epoch_" + str(epoch) + ".pt"
self.model.save(base_path / model_name, checkpoint=save_optimizer_state)
log_line(log)
log.info(f"EPOCH {epoch} done: loss {train_loss:.4f}" f" - lr {learning_rate:.7f}")
if use_tensorboard:
writer.add_scalar("train_loss", train_loss, epoch)
# evaluate on train / dev / test split depending on training settings
result_line: str = ""
if log_train:
train_eval_result = self.model.evaluate(
self.corpus.train,
gold_label_type=self.model.label_type,
mini_batch_size=eval_batch_size,
num_workers=num_workers,
embedding_storage_mode=embeddings_storage_mode,
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
)
result_line += f"\t{train_eval_result.log_line}"
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(self.corpus.train, embeddings_storage_mode, dynamic_embeddings)
if log_train_part:
train_part_eval_result = self.model.evaluate(
train_part,
gold_label_type=self.model.label_type,
mini_batch_size=eval_batch_size,
num_workers=num_workers,
embedding_storage_mode=embeddings_storage_mode,
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
)
result_line += f"\t{train_part_eval_result.loss}" f"\t{train_part_eval_result.log_line}"
log.info(
f"TRAIN_SPLIT : loss {train_part_eval_result.loss}"
f" - {main_evaluation_metric[1]}"
f" ({main_evaluation_metric[0]})"
f" {round(train_part_eval_result.main_score, 4)}"
)
if use_tensorboard:
for (metric_class_avg_type, metric_type) in metrics_for_tensorboard:
writer.add_scalar(
f"train_{metric_class_avg_type}_{metric_type}",
train_part_eval_result.classification_report[metric_class_avg_type][metric_type],
epoch,
)
if log_dev:
assert self.corpus.dev
dev_eval_result = self.model.evaluate(
self.corpus.dev,
gold_label_type=self.model.label_type,
mini_batch_size=eval_batch_size,
num_workers=num_workers,
out_path=base_path / "dev.tsv",
embedding_storage_mode=embeddings_storage_mode,
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
)
result_line += f"\t{dev_eval_result.loss}\t{dev_eval_result.log_line}"
log.info(
f"DEV : loss {dev_eval_result.loss}"
f" - {main_evaluation_metric[1]}"
f" ({main_evaluation_metric[0]})"
f" {round(dev_eval_result.main_score, 4)}"
)
# calculate scores using dev data if available
# append dev score to score history
dev_score_history.append(dev_eval_result.main_score)
dev_loss_history.append(dev_eval_result.loss)
dev_score = dev_eval_result.main_score
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(self.corpus.dev, embeddings_storage_mode, dynamic_embeddings)
if use_tensorboard:
writer.add_scalar("dev_loss", dev_eval_result.loss, epoch)
writer.add_scalar("dev_score", dev_eval_result.main_score, epoch)
for (
metric_class_avg_type,
metric_type,
) in metrics_for_tensorboard:
writer.add_scalar(
f"dev_{metric_class_avg_type}_{metric_type}",
dev_eval_result.classification_report[metric_class_avg_type][metric_type],
epoch,
)
if log_test:
assert self.corpus.test
test_eval_result = self.model.evaluate(
self.corpus.test,
gold_label_type=self.model.label_type,
mini_batch_size=eval_batch_size,
num_workers=num_workers,
out_path=base_path / "test.tsv",
embedding_storage_mode=embeddings_storage_mode,
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
)
result_line += f"\t{test_eval_result.loss}\t{test_eval_result.log_line}"
log.info(
f"TEST : loss {test_eval_result.loss} -"
f" {main_evaluation_metric[1]}"
f" ({main_evaluation_metric[0]}) "
f" {round(test_eval_result.main_score, 4)}"
)
# depending on memory mode, embeddings are moved to CPU, GPU or deleted
store_embeddings(self.corpus.test, embeddings_storage_mode, dynamic_embeddings)
if use_tensorboard:
writer.add_scalar("test_loss", test_eval_result.loss, epoch)
writer.add_scalar("test_score", test_eval_result.main_score, epoch)
for (
metric_class_avg_type,
metric_type,
) in metrics_for_tensorboard:
writer.add_scalar(
f"test_{metric_class_avg_type}_{metric_type}",
test_eval_result.classification_report[metric_class_avg_type][metric_type],
epoch,
)
# determine if this is the best model or if we need to anneal
current_epoch_has_best_model_so_far = False
# default mode: anneal against dev score
if not train_with_dev and not anneal_against_dev_loss:
if dev_score > best_validation_score:
current_epoch_has_best_model_so_far = True
best_validation_score = dev_score
if isinstance(scheduler, AnnealOnPlateau):
scheduler.step(dev_score, dev_eval_result.loss)
# alternative: anneal against dev loss
if not train_with_dev and anneal_against_dev_loss:
if dev_eval_result.loss < best_validation_score:
current_epoch_has_best_model_so_far = True
best_validation_score = dev_eval_result.loss
if isinstance(scheduler, AnnealOnPlateau):
scheduler.step(dev_eval_result.loss)
# alternative: anneal against train loss
if train_with_dev:
if train_loss < best_validation_score:
current_epoch_has_best_model_so_far = True
best_validation_score = train_loss
if isinstance(scheduler, AnnealOnPlateau):
scheduler.step(train_loss)
train_loss_history.append(train_loss)
# determine bad epoch number
try:
bad_epochs = scheduler.num_bad_epochs
except AttributeError:
bad_epochs = 0
for group in optimizer.param_groups:
new_learning_rate = group["lr"]
if new_learning_rate != previous_learning_rate:
bad_epochs = patience + 1
if previous_learning_rate == initial_learning_rate:
bad_epochs += initial_extra_patience
# log bad epochs
log.info(f"BAD EPOCHS (no improvement): {bad_epochs}")
if loss_txt is not None:
# output log file
with open(loss_txt, "a") as f:
# make headers on first epoch
if epoch == 1:
f.write("EPOCH\tTIMESTAMP\tBAD_EPOCHS" "\tLEARNING_RATE\tTRAIN_LOSS")
if log_train:
f.write("\tTRAIN_" + "\tTRAIN_".join(train_eval_result.log_header.split("\t")))
if log_train_part:
f.write(
"\tTRAIN_PART_LOSS\tTRAIN_PART_"
+ "\tTRAIN_PART_".join(train_part_eval_result.log_header.split("\t"))
)
if log_dev:
f.write("\tDEV_LOSS\tDEV_" + "\tDEV_".join(dev_eval_result.log_header.split("\t")))
if log_test:
f.write("\tTEST_LOSS\tTEST_" + "\tTEST_".join(test_eval_result.log_header.split("\t")))
f.write(
f"\n{epoch}\t{datetime.datetime.now():%H:%M:%S}"
f"\t{bad_epochs}"
f"\t{learning_rate:.4f}\t{train_loss}"
)
f.write(result_line)
# if checkpoint is enabled, save model at each epoch
if checkpoint and not param_selection_mode:
self.model.save(base_path / "checkpoint.pt", checkpoint=True)
# Check whether to save best model
if (
(not train_with_dev or anneal_with_restarts or anneal_with_prestarts)
and not param_selection_mode
and current_epoch_has_best_model_so_far
and not use_final_model_for_eval
):
log.info("saving best model")
self.model.save(base_path / "best-model.pt", checkpoint=save_optimizer_state)
if anneal_with_prestarts:
current_state_dict = self.model.state_dict()
self.model.load_state_dict(last_epoch_model_state_dict)
self.model.save(base_path / "pre-best-model.pt")
self.model.load_state_dict(current_state_dict)
if use_swa:
import torchcontrib
cast(torchcontrib.optim.SWA, optimizer).swap_swa_sgd()
# if we do not use dev data for model selection, save final model
if save_final_model and not param_selection_mode:
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
except KeyboardInterrupt:
log_line(log)
log.info("Exiting from training early.")
if not param_selection_mode:
log.info("Saving model ...")
self.model.save(base_path / "final-model.pt", checkpoint=save_optimizer_state)
log.info("Done.")
except Exception:
if create_file_logs:
log_handler.close()
log.removeHandler(log_handler)
raise
finally:
if use_tensorboard:
writer.close()
# test best model if test data is present
if self.corpus.test and not train_with_test:
final_score = self.final_test(
base_path=base_path,
eval_mini_batch_size=eval_batch_size,
num_workers=num_workers,
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary_for_eval=gold_label_dictionary_for_eval,
)
else:
final_score = 0
log.info("Test data not provided setting final score to 0")
if create_file_logs:
log_handler.close()
log.removeHandler(log_handler)
return {
"test_score": final_score,
"dev_score_history": dev_score_history,
"train_loss_history": train_loss_history,
"dev_loss_history": dev_loss_history,
}
def resume(
self,
model: Model,
**trainer_args,
):
assert model.model_card is not None
self.model = model
# recover all arguments that were used to train this model
args_used_to_train_model = model.model_card["training_parameters"]
# you can overwrite params with your own
for param in trainer_args:
args_used_to_train_model[param] = trainer_args[param]
if param == "optimizer" and "optimizer_state_dict" in args_used_to_train_model:
del args_used_to_train_model["optimizer_state_dict"]
if param == "scheduler" and "scheduler_state_dict" in args_used_to_train_model:
del args_used_to_train_model["scheduler_state_dict"]
# surface nested arguments
kwargs = args_used_to_train_model["kwargs"]
del args_used_to_train_model["kwargs"]
# resume training with these parameters
self.train(**args_used_to_train_model, **kwargs)
def fine_tune(
self,
base_path: Union[Path, str],
learning_rate: float = 5e-5,
max_epochs: int = 10,
optimizer=torch.optim.AdamW,
scheduler=LinearSchedulerWithWarmup,
warmup_fraction: float = 0.1,
mini_batch_size: int = 4,
embeddings_storage_mode: str = "none",
use_final_model_for_eval: bool = True,
**trainer_args,
):
return self.train(
base_path=base_path,
learning_rate=learning_rate,
max_epochs=max_epochs,
optimizer=optimizer,
scheduler=scheduler,
warmup_fraction=warmup_fraction,
mini_batch_size=mini_batch_size,
embeddings_storage_mode=embeddings_storage_mode,
use_final_model_for_eval=use_final_model_for_eval,
**trainer_args,
)
def final_test(
self,
base_path: Union[Path, str],
eval_mini_batch_size: int,
main_evaluation_metric: Tuple[str, str],
num_workers: Optional[int] = 8,
gold_label_dictionary_for_eval: Optional[Dictionary] = None,
):
base_path = Path(base_path)
base_path.mkdir(exist_ok=True, parents=True)
log_line(log)
self.model.eval()
if (base_path / "best-model.pt").exists():
self.model.load_state_dict(self.model.load(base_path / "best-model.pt").state_dict())
else:
log.info("Testing using last state of model ...")
assert self.corpus.test
test_results = self.model.evaluate(
self.corpus.test,
gold_label_type=self.model.label_type,
mini_batch_size=eval_mini_batch_size,
num_workers=num_workers,
out_path=base_path / "test.tsv",
embedding_storage_mode="none",
main_evaluation_metric=main_evaluation_metric,
gold_label_dictionary=gold_label_dictionary_for_eval,
)
log.info(test_results.log_line)
log.info(test_results.detailed_results)
log_line(log)
# if we are training over multiple datasets, do evaluation for each
if isinstance(self.corpus, MultiCorpus):
for subcorpus in self.corpus.corpora:
log_line(log)
if subcorpus.test:
subcorpus_results = self.model.evaluate(
subcorpus.test,
gold_label_type=self.model.label_type,
mini_batch_size=eval_mini_batch_size,
num_workers=num_workers,
out_path=base_path / f"{subcorpus.name}-test.tsv",
embedding_storage_mode="none",
main_evaluation_metric=main_evaluation_metric,
)
log.info(subcorpus.name)
log.info(subcorpus_results.log_line)
# get and return the final test score of best model
final_score = test_results.main_score
return final_score
def find_learning_rate(
self,
base_path: Union[Path, str],
optimizer,
mini_batch_size: int = 32,
start_learning_rate: float = 1e-7,
end_learning_rate: float = 10,
iterations: int = 1000,
stop_early: bool = True,
file_name: str = "learning_rate.tsv",
**kwargs,
) -> Path:
best_loss = None
# cast string to Path
base_path = Path(base_path)
base_path.mkdir(exist_ok=True, parents=True)
learning_rate_tsv = init_output_file(base_path, file_name)
with open(learning_rate_tsv, "a") as f:
f.write("ITERATION\tTIMESTAMP\tLEARNING_RATE\tTRAIN_LOSS\n")
optimizer = optimizer(self.model.parameters(), lr=start_learning_rate, **kwargs)
train_data = self.corpus.train
scheduler = ExpAnnealLR(optimizer, end_learning_rate, iterations)
model_state = self.model.state_dict()
self.model.train()
step = 0
loss_list = []
average_loss_list = []
while step < iterations:
batch_loader = DataLoader(train_data, batch_size=mini_batch_size, shuffle=True)
for batch in batch_loader:
step += 1
# forward pass
loss = self.model.forward_loss(batch)
if isinstance(loss, tuple):
loss = loss[0]
# update optimizer and scheduler
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.0)
optimizer.step()
scheduler.step()
learning_rate = scheduler.get_lr()[0]
# append current loss to list of losses for all iterations
loss_list.append(loss.item())
# compute averaged loss
import statistics
moving_avg_loss = statistics.mean(loss_list)
average_loss_list.append(moving_avg_loss)
if len(average_loss_list) > 10:
drop = average_loss_list[-10] - moving_avg_loss
else:
drop = 0.0
if not best_loss or moving_avg_loss < best_loss:
best_loss = moving_avg_loss
if step > iterations:
break
if stop_early and (moving_avg_loss > 4 * best_loss or torch.isnan(loss)):
log_line(log)
log.info("loss diverged - stopping early!")
step = iterations
break
with open(str(learning_rate_tsv), "a") as f:
f.write(f"{step}\t{learning_rate}\t{loss.item()}" f"\t{moving_avg_loss}\t{drop}\n")
self.model.load_state_dict(model_state)
self.model.to(flair.device)
log_line(log)
log.info(f"learning rate finder finished - plot {learning_rate_tsv}")
log_line(log)
return Path(learning_rate_tsv)
| [
"torch.optim.lr_scheduler.OneCycleLR",
"torch.isnan",
"torch.utils.data.dataset.ConcatDataset",
"torch.utils.data.dataset.Subset",
"torch.utils.tensorboard.SummaryWriter"
] | 1.5.0 | adriensas/flair | f01b0e7ff9a87d3862acae50aeaffdc8e8b8ac21 |
1.5 | from typing import Tuple
import numpy as np
import torch
import torch.nn
from torch.nn.functional import softmax
from torch.nn.utils.rnn import pack_padded_sequence
import flair
from flair.data import Dictionary, Label, List
START_TAG: str = "<START>"
STOP_TAG: str = "<STOP>"
class ViterbiLoss(torch.nn.Module):
"""
Calculates the loss for each sequence up to its length t.
"""
def __init__(self, tag_dictionary: Dictionary):
"""
:param tag_dictionary: tag_dictionary of task
"""
super(ViterbiLoss, self).__init__()
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def forward(self, features_tuple: tuple, targets: torch.Tensor) -> torch.Tensor:
"""
Forward propagation of Viterbi Loss
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentences in batch, transitions from CRF
:param targets: true tags for sentences which will be converted to matrix indices.
:return: average Viterbi Loss over batch size
"""
features, lengths, transitions = features_tuple
batch_size = features.size(0)
seq_len = features.size(1)
targets, targets_matrix_indices = self._format_targets(targets, lengths)
targets_matrix_indices = torch.tensor(targets_matrix_indices, dtype=torch.long).unsqueeze(2).to(flair.device)
# scores_at_targets[range(features.shape[0]), lengths.values -1]
# Squeeze crf scores matrices in 1-dim shape and gather scores at targets by matrix indices
scores_at_targets = torch.gather(features.view(batch_size, seq_len, -1), 2, targets_matrix_indices)
scores_at_targets = pack_padded_sequence(scores_at_targets, lengths, batch_first=True)[0]
transitions_to_stop = transitions[
np.repeat(self.stop_tag, features.shape[0]),
[target[length - 1] for target, length in zip(targets, lengths)],
]
gold_score = scores_at_targets.sum() + transitions_to_stop.sum()
scores_upto_t = torch.zeros(batch_size, self.tagset_size, device=flair.device)
for t in range(max(lengths)):
batch_size_t = sum(
[length > t for length in lengths]
) # since batch is ordered, we can save computation time by reducing our effective batch_size
if t == 0:
# Initially, get scores from <start> tag to all other tags
scores_upto_t[:batch_size_t] = (
scores_upto_t[:batch_size_t] + features[:batch_size_t, t, :, self.start_tag]
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and log-sum-exp
# Remember, the cur_tag of the previous timestep is the prev_tag of this timestep
scores_upto_t[:batch_size_t] = self._log_sum_exp(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t].unsqueeze(1), dim=2
)
all_paths_scores = self._log_sum_exp(scores_upto_t + transitions[self.stop_tag].unsqueeze(0), dim=1).sum()
viterbi_loss = all_paths_scores - gold_score
return viterbi_loss
@staticmethod
def _log_sum_exp(tensor, dim):
"""
Calculates the log-sum-exponent of a tensor's dimension in a numerically stable way.
:param tensor: tensor
:param dim: dimension to calculate log-sum-exp of
:return: log-sum-exp
"""
m, _ = torch.max(tensor, dim)
m_expanded = m.unsqueeze(dim).expand_as(tensor)
return m + torch.log(torch.sum(torch.exp(tensor - m_expanded), dim))
def _format_targets(self, targets: torch.Tensor, lengths: torch.IntTensor):
"""
Formats targets into matrix indices.
CRF scores contain per sentence, per token a (tagset_size x tagset_size) matrix, containing emission score for
token j + transition prob from previous token i. Means, if we think of our rows as "to tag" and our columns
as "from tag", the matrix in cell [10,5] would contain the emission score for tag 10 + transition score
from previous tag 5 and could directly be addressed through the 1-dim indices (10 + tagset_size * 5) = 70,
if our tagset consists of 12 tags.
:param targets: targets as in tag dictionary
:param lengths: lengths of sentences in batch
"""
targets_per_sentence = []
targets_list = targets.tolist()
for cut in lengths:
targets_per_sentence.append(targets_list[:cut])
targets_list = targets_list[cut:]
for t in targets_per_sentence:
t += [self.tag_dictionary.get_idx_for_item(STOP_TAG)] * (int(lengths.max().item()) - len(t))
matrix_indices = list(
map(
lambda s: [self.tag_dictionary.get_idx_for_item(START_TAG) + (s[0] * self.tagset_size)]
+ [s[i] + (s[i + 1] * self.tagset_size) for i in range(0, len(s) - 1)],
targets_per_sentence,
)
)
return targets_per_sentence, matrix_indices
class ViterbiDecoder:
"""
Decodes a given sequence using the Viterbi algorithm.
"""
def __init__(self, tag_dictionary: Dictionary):
"""
:param tag_dictionary: Dictionary of tags for sequence labeling task
"""
self.tag_dictionary = tag_dictionary
self.tagset_size = len(tag_dictionary)
self.start_tag = tag_dictionary.get_idx_for_item(START_TAG)
self.stop_tag = tag_dictionary.get_idx_for_item(STOP_TAG)
def decode(self, features_tuple: tuple, probabilities_for_all_classes: bool) -> Tuple[List, List]:
"""
Decoding function returning the most likely sequence of tags.
:param features_tuple: CRF scores from forward method in shape (batch size, seq len, tagset size, tagset size),
lengths of sentence in batch, transitions of CRF
:param probabilities_for_all_classes: whether to return probabilities for all tags
:return: decoded sequences
"""
features, lengths, transitions = features_tuple
all_tags = []
batch_size = features.size(0)
seq_len = features.size(1)
# Create a tensor to hold accumulated sequence scores at each current tag
scores_upto_t = torch.zeros(batch_size, seq_len + 1, self.tagset_size).to(flair.device)
# Create a tensor to hold back-pointers
# i.e., indices of the previous_tag that corresponds to maximum accumulated score at current tag
# Let pads be the <end> tag index, since that was the last tag in the decoded sequence
backpointers = (
torch.ones((batch_size, seq_len + 1, self.tagset_size), dtype=torch.long, device=flair.device)
* self.stop_tag
)
for t in range(seq_len):
batch_size_t = sum([length > t for length in lengths]) # effective batch size (sans pads) at this timestep
terminates = [i for i, length in enumerate(lengths) if length == t + 1]
if t == 0:
scores_upto_t[:batch_size_t, t] = features[:batch_size_t, t, :, self.start_tag]
backpointers[:batch_size_t, t, :] = (
torch.ones((batch_size_t, self.tagset_size), dtype=torch.long) * self.start_tag
)
else:
# We add scores at current timestep to scores accumulated up to previous timestep, and
# choose the previous timestep that corresponds to the max. accumulated score for each current timestep
scores_upto_t[:batch_size_t, t], backpointers[:batch_size_t, t, :] = torch.max(
features[:batch_size_t, t, :, :] + scores_upto_t[:batch_size_t, t - 1].unsqueeze(1), dim=2
)
# If sentence is over, add transition to STOP-tag
if terminates:
scores_upto_t[terminates, t + 1], backpointers[terminates, t + 1, :] = torch.max(
scores_upto_t[terminates, t].unsqueeze(1) + transitions[self.stop_tag].unsqueeze(0), dim=2
)
# Decode/trace best path backwards
decoded = torch.zeros((batch_size, backpointers.size(1)), dtype=torch.long, device=flair.device)
pointer = torch.ones((batch_size, 1), dtype=torch.long, device=flair.device) * self.stop_tag
for t in list(reversed(range(backpointers.size(1)))):
decoded[:, t] = torch.gather(backpointers[:, t, :], 1, pointer).squeeze(1)
pointer = decoded[:, t].unsqueeze(1)
# Sanity check
assert torch.equal(
decoded[:, 0], torch.ones((batch_size), dtype=torch.long, device=flair.device) * self.start_tag
)
# remove start-tag and backscore to stop-tag
scores_upto_t = scores_upto_t[:, :-1, :]
decoded = decoded[:, 1:]
# Max + Softmax to get confidence score for predicted label and append label to each token
scores = softmax(scores_upto_t, dim=2)
confidences = torch.max(scores, dim=2)
tags = []
for tag_seq, tag_seq_conf, length_seq in zip(decoded, confidences.values, lengths):
tags.append(
[
Label(self.tag_dictionary.get_item_for_index(tag), conf.item())
for tag, conf in list(zip(tag_seq, tag_seq_conf))[:length_seq]
]
)
if probabilities_for_all_classes:
all_tags = self._all_scores_for_token(scores, lengths)
return tags, all_tags
def _all_scores_for_token(self, scores: torch.Tensor, lengths: torch.IntTensor):
"""
Returns all scores for each tag in tag dictionary.
:param scores: Scores for current sentence.
"""
scores = scores.numpy()
prob_tags_per_sentence = []
for scores_sentence, length in zip(scores, lengths):
scores_sentence = scores_sentence[:length]
prob_tags_per_sentence.append(
[
[
Label(self.tag_dictionary.get_item_for_index(score_id), score)
for score_id, score in enumerate(score_dist)
]
for score_dist in scores_sentence
]
)
return prob_tags_per_sentence
| [
"torch.zeros",
"torch.max",
"torch.gather",
"torch.ones",
"torch.tensor",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.functional.softmax",
"torch.exp"
] | 1.5.0 | adriensas/flair | f01b0e7ff9a87d3862acae50aeaffdc8e8b8ac21 |
0.4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import json
import os
import shutil
import tempfile
from argparse import Namespace
import numpy as np
import pytest
import torch
from espnet.nets.pytorch_backend.e2e_tts_fastspeech import FeedForwardTransformer
from espnet.nets.pytorch_backend.e2e_tts_transformer import Transformer
from espnet.nets.pytorch_backend.fastspeech.duration_calculator import DurationCalculator
from espnet.nets.pytorch_backend.fastspeech.length_regulator import LengthRegulator
from espnet.nets.pytorch_backend.nets_utils import pad_list
def prepare_inputs(idim, odim, ilens, olens,
device=torch.device('cpu')):
ilens = torch.LongTensor(ilens).to(device)
olens = torch.LongTensor(olens).to(device)
xs = [np.random.randint(0, idim, l) for l in ilens]
ys = [np.random.randn(l, odim) for l in olens]
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1:] = 1
batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
return batch
def make_transformer_args(**kwargs):
defaults = dict(
embed_dim=0,
eprenet_conv_layers=0,
eprenet_conv_filts=0,
eprenet_conv_chans=0,
dprenet_layers=2,
dprenet_units=256,
adim=32,
aheads=4,
elayers=2,
eunits=128,
dlayers=2,
dunits=128,
postnet_layers=5,
postnet_filts=5,
postnet_chans=512,
eprenet_dropout_rate=0.1,
dprenet_dropout_rate=0.5,
postnet_dropout_rate=0.1,
transformer_enc_dropout_rate=0.1,
transformer_enc_positional_dropout_rate=0.1,
transformer_enc_attn_dropout_rate=0.0,
transformer_dec_dropout_rate=0.1,
transformer_dec_positional_dropout_rate=0.1,
transformer_dec_attn_dropout_rate=0.3,
transformer_enc_dec_attn_dropout_rate=0.0,
use_masking=True,
bce_pos_weight=1.0,
use_batch_norm=True,
use_scaled_pos_enc=True,
encoder_normalize_before=True,
decoder_normalize_before=True,
encoder_concat_after=False,
decoder_concat_after=False,
transformer_init="pytorch",
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
reduction_factor=1,
loss_type="L1",
use_guided_attn_loss=False,
num_heads_applied_guided_attn=2,
num_layers_applied_guided_attn=2,
guided_attn_loss_sigma=0.4,
modules_applied_guided_attn=["encoder", "decoder", "encoder-decoder"]
)
defaults.update(kwargs)
return defaults
def make_feedforward_transformer_args(**kwargs):
defaults = dict(
adim=32,
aheads=4,
elayers=2,
eunits=128,
dlayers=2,
dunits=128,
duration_predictor_layers=2,
duration_predictor_chans=128,
duration_predictor_kernel_size=3,
duration_predictor_dropout_rate=0.1,
positionwise_layer_type="linear",
positionwise_conv_kernel_size=1,
transformer_enc_dropout_rate=0.1,
transformer_enc_positional_dropout_rate=0.1,
transformer_enc_attn_dropout_rate=0.0,
transformer_dec_dropout_rate=0.1,
transformer_dec_positional_dropout_rate=0.1,
transformer_dec_attn_dropout_rate=0.3,
transformer_enc_dec_attn_dropout_rate=0.0,
use_masking=True,
use_scaled_pos_enc=True,
encoder_normalize_before=True,
decoder_normalize_before=True,
encoder_concat_after=False,
decoder_concat_after=False,
transformer_init="pytorch",
initial_encoder_alpha=1.0,
initial_decoder_alpha=1.0,
transfer_encoder_from_teacher=False,
transferred_encoder_module="all",
reduction_factor=1,
teacher_model=None,
)
defaults.update(kwargs)
return defaults
@pytest.mark.parametrize(
"model_dict", [
({}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"positionwise_layer_type": "conv1d", "positionwise_conv_kernel_size": 3}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"encoder_concat_after": True}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
])
def test_fastspeech_trainable_and_decodable(model_dict):
# make args
idim, odim = 10, 25
teacher_model_args = make_transformer_args(**model_dict)
model_args = make_feedforward_transformer_args(**model_dict)
# setup batch
ilens = [10, 5]
olens = [20, 15]
batch = prepare_inputs(idim, odim, ilens, olens)
# define teacher model and save it
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", 'wb') as f:
f.write(json.dumps((idim, odim, teacher_model_args),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
# define model
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# decodable
model.eval()
with torch.no_grad():
model.inference(batch["xs"][0][:batch["ilens"][0]])
model.calculate_all_attentions(**batch)
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="gpu required")
@pytest.mark.parametrize(
"model_dict", [
({}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"encoder_concat_after": True}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
])
def test_fastspeech_gpu_trainable(model_dict):
# make args
idim, odim = 10, 25
teacher_model_args = make_transformer_args(**model_dict)
model_args = make_feedforward_transformer_args(**model_dict)
# setup batch
ilens = [10, 5]
olens = [20, 15]
device = torch.device('cuda')
batch = prepare_inputs(idim, odim, ilens, olens, device=device)
# define teacher model and save it
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", 'wb') as f:
f.write(json.dumps((idim, odim, teacher_model_args),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
# define model
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="multi gpu required")
@pytest.mark.parametrize(
"model_dict", [
({}),
({"use_masking": False}),
({"use_scaled_pos_enc": False}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"encoder_concat_after": True}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
])
def test_fastspeech_multi_gpu_trainable(model_dict):
# make args
idim, odim = 10, 25
teacher_model_args = make_transformer_args(**model_dict)
model_args = make_feedforward_transformer_args(**model_dict)
# setup batch
ilens = [10, 5]
olens = [20, 15]
device = torch.device('cuda')
batch = prepare_inputs(idim, odim, ilens, olens, device=device)
# define teacher model and save it
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", 'wb') as f:
f.write(json.dumps((idim, odim, teacher_model_args),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
# define model
ngpu = 2
device_ids = list(range(ngpu))
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
model = torch.nn.DataParallel(model, device_ids)
model.to(device)
optimizer = torch.optim.Adam(model.parameters())
# trainable
loss = model(**batch).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
@pytest.mark.parametrize(
"model_dict", [
({}),
({"use_scaled_pos_enc": False}),
({"init_encoder_module": "embed"}),
({"encoder_normalize_before": False}),
({"decoder_normalize_before": False}),
({"encoder_normalize_before": False, "decoder_normalize_before": False}),
({"encoder_concat_after": True}),
({"decoder_concat_after": True}),
({"encoder_concat_after": True, "decoder_concat_after": True}),
])
def test_initialization(model_dict):
# make args
idim, odim = 10, 25
teacher_model_args = make_transformer_args(**model_dict)
model_args = make_feedforward_transformer_args(**model_dict)
# define teacher model and save it
teacher_model = Transformer(idim, odim, Namespace(**teacher_model_args))
tmpdir = tempfile.mkdtemp(prefix="tmp_", dir="/tmp")
torch.save(teacher_model.state_dict(), tmpdir + "/model.dummy.best")
with open(tmpdir + "/model.json", 'wb') as f:
f.write(json.dumps((idim, odim, teacher_model_args),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
# define model
model_args["teacher_model"] = tmpdir + "/model.dummy.best"
model_args["transfer_encoder_from_teacher"] = True
model = FeedForwardTransformer(idim, odim, Namespace(**model_args))
# check initialization
if model_args["transferred_encoder_module"] == "all":
for p1, p2 in zip(model.encoder.parameters(), model.teacher.encoder.parameters()):
np.testing.assert_array_equal(p1.data.cpu().numpy(), p2.data.cpu().numpy())
else:
np.testing.assert_array_equal(
model.encoder.embed[0].weight.data.cpu().numpy(),
model.teacher.encoder.embed[0].weight.data.cpu().numpy()
)
# remove tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
def test_length_regulator():
# prepare inputs
idim = 5
ilens = [10, 5, 3]
xs = pad_list([torch.randn((ilen, idim)) for ilen in ilens], 0.0)
ds = pad_list([torch.arange(ilen) for ilen in ilens], 0)
# test with non-zero durations
length_regulator = LengthRegulator()
xs_expand = length_regulator(xs, ds, ilens)
assert int(xs_expand.shape[1]) == int(ds.sum(dim=-1).max())
# test with duration including zero
ds[:, 2] = 0
xs_expand = length_regulator(xs, ds, ilens)
assert int(xs_expand.shape[1]) == int(ds.sum(dim=-1).max())
def test_duration_calculator():
# define duration calculator
idim, odim = 10, 25
teacher_model_args = make_transformer_args()
teacher = Transformer(idim, odim, Namespace(**teacher_model_args))
duration_calculator = DurationCalculator(teacher)
# setup batch
ilens = [10, 5, 3]
olens = [20, 15, 10]
batch = prepare_inputs(idim, odim, ilens, olens)
# calculate durations
ds = duration_calculator(batch["xs"], batch["ilens"], batch["ys"], batch["olens"])
np.testing.assert_array_equal(ds.sum(dim=-1).cpu().numpy(), batch["olens"].cpu().numpy())
| [
"torch.device",
"torch.arange",
"torch.no_grad",
"torch.cuda.device_count",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.randn",
"torch.nn.DataParallel"
] | 0.4.1 | szmmm/speechchain | 909724c6f305588a52958f64f584ad21696b5173 |
1.1 | import torch
class AnchorGenerator(object):
"""
Examples:
>>> from mmdet.core import AnchorGenerator
>>> self = AnchorGenerator(9, [1.], [1.])
>>> all_anchors = self.grid_anchors((2, 2), device='cpu')
>>> print(all_anchors)
tensor([[ 0., 0., 8., 8.],
[16., 0., 24., 8.],
[ 0., 16., 8., 24.],
[16., 16., 24., 24.]])
"""
def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None):
self.base_size = base_size
self.scales = torch.Tensor(scales)
self.ratios = torch.Tensor(ratios)
self.scale_major = scale_major
self.ctr = ctr
self.base_anchors = self.gen_base_anchors()
@property
def num_base_anchors(self):
return self.base_anchors.size(0)
def gen_base_anchors(self):
w = self.base_size
h = self.base_size
if self.ctr is None:
x_ctr = 0.5 * (w - 1)
y_ctr = 0.5 * (h - 1)
else:
x_ctr, y_ctr = self.ctr
h_ratios = torch.sqrt(self.ratios)
w_ratios = 1 / h_ratios
if self.scale_major:
ws = (w * w_ratios[:, None] * self.scales[None, :]).view(-1)
hs = (h * h_ratios[:, None] * self.scales[None, :]).view(-1)
else:
ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1)
hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1)
# yapf: disable
base_anchors = torch.stack(
[
x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
],
dim=-1).round()
# yapf: enable
return base_anchors
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_anchors(self, featmap_size, stride=16, device='cuda'):
base_anchors = self.base_anchors.to(device)
feat_h, feat_w = featmap_size
shift_x = torch.arange(0, feat_w, device=device) * stride
shift_y = torch.arange(0, feat_h, device=device) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
shifts = shifts.type_as(base_anchors)
# first feat_w elements correspond to the first row of shifts
# add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
# shifted anchors (K, A, 4), reshape to (K*A, 4)
all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
all_anchors = all_anchors.view(-1, 4)
# first A rows correspond to A anchors of (0, 0) in feature map,
# then (0, 1), (0, 2), ...
return all_anchors
def valid_flags(self, featmap_size, valid_size, device='cuda'):
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
valid = valid[:, None].expand(
valid.size(0), self.num_base_anchors).contiguous().view(-1)
return valid
| [
"torch.zeros",
"torch.sqrt",
"torch.stack",
"torch.arange",
"torch.Tensor"
] | 1.1.0 | witnessai/GRAN | 952c2b08a58f3b0087f0f18fd48f8e385e45908b |
1.1 | from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, merge_aug_proposals, multiclass_nms)
class RPNTestMixin(object):
def simple_test_rpn(self, x, img_meta, rpn_test_cfg):
rpn_outs = self.rpn_head(x)
proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
return proposal_list
def aug_test_rpn(self, feats, img_metas, rpn_test_cfg):
imgs_per_gpu = len(img_metas[0])
aug_proposals = [[] for _ in range(imgs_per_gpu)]
for x, img_meta in zip(feats, img_metas):
proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)
for i, proposals in enumerate(proposal_list):
aug_proposals[i].append(proposals)
# reorganize the order of 'img_metas' to match the dimensions
# of 'aug_proposals'
aug_img_metas = []
for i in range(imgs_per_gpu):
aug_img_meta = []
for j in range(len(img_metas)):
aug_img_meta.append(img_metas[j][i])
aug_img_metas.append(aug_img_meta)
# after merging, proposals will be rescaled to the original image size
merged_proposals = [
merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg)
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
]
return merged_proposals
class BBoxTestMixin(object):
def simple_test_bboxes(self,
x,
img_meta,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
# if img_meta[0]['filename'] == 'data/coco/val2014/COCO_val2014_000000242605.jpg':
# pass
# import ipdb
# ipdb.set_trace()
if 'fusedfeat2sem' in str([x for x in self.bbox_head.modules()]):
import torchvision
import torch
import ipdb
# ipdb.set_trace()
full_img_bboxes = torch.Tensor([[0, 0, 0, x[3].shape[3], x[3].shape[2]]]).cuda()
full_img_feat = torchvision.ops.roi_align(x[3], full_img_bboxes, (16, 16))
cls_score, bbox_pred = self.bbox_head(roi_feats, full_img_feat)
else:
cls_score, bbox_pred = self.bbox_head(roi_feats, rois)
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip)
rois = bbox2roi([proposals])
# recompute feature maps to save GPU memory
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
if 'fusedfeat2sem' in str([x for x in self.bbox_head.modules()]):
import torchvision
import torch
import ipdb
# ipdb.set_trace()
full_img_bboxes = torch.Tensor([[0, 0, 0, x[3].shape[3], x[3].shape[2]]]).cuda()
full_img_feat = torchvision.ops.roi_align(x[3], full_img_bboxes, (16, 16))
cls_score, bbox_pred = self.bbox_head(roi_feats, full_img_feat)
elif self.bbox_head.has_matcher:
cls_score, bbox_pred = self.bbox_head(roi_feats, rois)
else:
cls_score, bbox_pred = self.bbox_head(roi_feats)
bboxes, scores = self.bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
class MaskTestMixin(object):
def simple_test_mask(self,
x,
img_meta,
det_bboxes,
det_labels,
rescale=False):
# image shape of the first image in the batch (only one)
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
_bboxes = (
det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes,
det_labels,
self.test_cfg.rcnn,
ori_shape, scale_factor,
rescale)
return segm_result
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
# convert to numpy array to save memory
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas,
self.test_cfg.rcnn)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg.rcnn,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
| [
"torch.Tensor"
] | 1.1.0 | witnessai/GRAN | 952c2b08a58f3b0087f0f18fd48f8e385e45908b |
1.1 | import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from ..utils import ConvModule
from mmdet.core import (auto_fp16)
from ..registry import HEADS
@HEADS.register_module
class GlobalContextSemanticHead(nn.Module):
"""Simplest RoI head, with only two fc layers for semantic and
regression respectively"""
def __init__(self,
roi_feat_size=7,
in_channels=256,
num_convs=3,
conv_out_channels=256,
num_fcs=1,
fc_out_channels=1024,
semantic_dims=1024,
num_classes=49,
conv_cfg=None,
norm_cfg=None
):
super(GlobalContextSemanticHead, self).__init__()
self.roi_feat_size = _pair(roi_feat_size)
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
self.in_channels = in_channels
self.fp16_enabled = False
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
in_channels = self.in_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.out_dim = semantic_dims
self.relu = nn.ReLU(inplace=True)
self.convs, self.fcs, self.last_dim = self._add_conv_fc_branch(num_convs, num_fcs, in_channels)
# self.fc2 = nn.Linear(self.last_dim, self.out_dim)
self.final_fc = nn.Linear(self.last_dim, num_classes)
self.debug_imgs = None
def _add_conv_fc_branch(self,
num_convs,
num_fcs,
in_channels):
last_layer_dim = in_channels
context_convs = nn.ModuleList()
if num_convs > 0:
for i in range(num_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
context_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
context_fcs = nn.ModuleList()
if num_fcs > 0:
last_layer_dim *= self.roi_feat_area
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
context_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return context_convs, context_fcs, last_layer_dim
def init_weights(self):
nn.init.normal_(self.final_fc.weight, 0, 0.001)
nn.init.constant_(self.final_fc.bias, 0)
# nn.init.normal_(self.fc2.weight, 0, 0.001)
# nn.init.constant_(self.fc2.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if x.dim() > 2:
x = x.view(x.size(0), -1)
for fc in self.fcs:
x = self.relu(fc(x))
# x = self.relu(self.fc2(x))
x = self.final_fc(x) # 1024*49
return x | [
"torch.nn.Linear",
"torch.nn.modules.utils._pair",
"torch.nn.ModuleList",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.init.normal_"
] | 1.1.0 | witnessai/GRAN | 952c2b08a58f3b0087f0f18fd48f8e385e45908b |
1.1 | from __future__ import division
import math
import numpy as np
import torch
from mmcv.runner.utils import get_dist_info
from torch.utils.data import DistributedSampler as _DistributedSampler
from torch.utils.data import Sampler
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, size in enumerate(self.group_sizes):
self.num_samples += int(np.ceil(
size / self.samples_per_gpu)) * self.samples_per_gpu
def __iter__(self):
indices = []
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
np.random.shuffle(indice)
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate(
[indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [
indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
for i in np.random.permutation(
range(len(indices) // self.samples_per_gpu))
]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class DistributedGroupSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self,
dataset,
samples_per_gpu=1,
num_replicas=None,
rank=None):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += int(
math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
self.num_replicas)) * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
indice = indice[list(torch.randperm(int(size),
generator=g))].tolist()
extra = int(
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
# pad indice
tmp = indice.copy()
for _ in range(extra // size):
indice.extend(tmp)
indice.extend(tmp[:extra % size])
indices.extend(indice)
assert len(indices) == self.total_size
indices = [
indices[j] for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
| [
"torch.Generator"
] | 1.1.0 | witnessai/GRAN | 952c2b08a58f3b0087f0f18fd48f8e385e45908b |
1.0 | import warnings
warnings.filterwarnings("ignore")
import sys
import matplotlib.pylab as plt
import scipy
sys.path.append('waveglow/')
import numpy as np
import torch
from hparams import create_hparams
from train import load_model
from text import text_to_sequence
def plot_data(data, figsize=(16, 4)):
fig, axes = plt.subplots(1, len(data), figsize=figsize)
for i in range(len(data)):
axes[i].imshow(data[i], aspect='auto', origin='bottom',
interpolation='none')
hparams = create_hparams()
hparams.sampling_rate = 22050
# hparams.gate_threshold = 0.1
checkpoint_path = "tacotron2_statedict.pt"
# checkpoint_path = "outdir/checkpoint_12500"
# checkpoint_path = "outdir/saved_10000"
#checkpoint_path = "outdir_self_data/saved_170000"
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'])
_ = model.cuda().eval().half()
waveglow_path = 'waveglow_256channels.pt'
# waveglow_path = 'waveglow/checkpoints1/saved_356000'
waveglow = torch.load(waveglow_path)['model']
waveglow.cuda().eval().half()
for k in waveglow.convinv:
k.float()
text = 'Littlelights is awesome!'
sequence = np.array(text_to_sequence(text, ['english_cleaners']))[None, :]
sequence = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()
# text_list = [
# "Read loudly, and be a super hero!",
# "Join me to learn some words.",
# ]
# sequence_list = [np.array(text_to_sequence(text, ['english_cleaners']))[None, :] for text in text_list]
# sequence_list = torch.autograd.Variable(torch.from_numpy(sequence_list)).cuda().long()
mel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)
with torch.no_grad():
audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)
data = audio[0].data.cpu().numpy().astype(np.float32)
scipy.io.wavfile.write('audio_output/{}.wav'.format(text), hparams.sampling_rate, data)
| [
"torch.no_grad",
"torch.load",
"torch.from_numpy"
] | 1.0.0 | gaoxiao/tacotron2 | 0a58682c8025f892b29898088ae275b9086887b6 |
1.9 | import os
from copy import deepcopy
import pytest
import torch
from torch.optim import SGD, Adadelta, Adagrad, Adam, RMSprop
from pythae.customexception import BadInheritanceError
from pythae.models.base.base_utils import ModelOutput
from pythae.models import VQVAE, VQVAEConfig
from pythae.trainers import BaseTrainer, BaseTrainerConfig
from pythae.pipelines import TrainingPipeline
from tests.data.custom_architectures import (
Decoder_AE_Conv,
Encoder_AE_Conv,
NetBadInheritance,
)
PATH = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(params=[VQVAEConfig(), VQVAEConfig(latent_dim=4)])
def model_configs_no_input_dim(request):
return request.param
@pytest.fixture(
params=[
VQVAEConfig(
input_dim=(1, 28, 28), latent_dim=4, num_embeddings=10
), # ! Needs squared latent_dim !
VQVAEConfig(
input_dim=(1, 28, 28),
beta=0.02,
latent_dim=4,
),
]
)
def model_configs(request):
return request.param
@pytest.fixture
def custom_encoder(model_configs):
return Encoder_AE_Conv(model_configs)
@pytest.fixture
def custom_decoder(model_configs):
return Decoder_AE_Conv(model_configs)
class Test_Model_Building:
@pytest.fixture()
def bad_net(self):
return NetBadInheritance()
def test_build_model(self, model_configs):
model = VQVAE(model_configs)
assert all(
[
model.input_dim == model_configs.input_dim,
model.latent_dim == model_configs.latent_dim,
]
)
def test_raises_bad_inheritance(self, model_configs, bad_net):
with pytest.raises(BadInheritanceError):
model = VQVAE(model_configs, encoder=bad_net)
with pytest.raises(BadInheritanceError):
model = VQVAE(model_configs, decoder=bad_net)
def test_raises_no_input_dim(
self, model_configs_no_input_dim, custom_encoder, custom_decoder
):
with pytest.raises(AttributeError):
model = VQVAE(model_configs_no_input_dim)
with pytest.raises(AttributeError):
model = VQVAE(model_configs_no_input_dim, encoder=custom_encoder)
with pytest.raises(AttributeError):
model = VQVAE(model_configs_no_input_dim, decoder=custom_decoder)
def test_build_custom_arch(self, model_configs, custom_encoder, custom_decoder):
model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)
assert model.encoder == custom_encoder
assert not model.model_config.uses_default_encoder
assert model.decoder == custom_decoder
assert not model.model_config.uses_default_decoder
model = VQVAE(model_configs, encoder=custom_encoder)
assert model.encoder == custom_encoder
assert not model.model_config.uses_default_encoder
assert model.model_config.uses_default_decoder
model = VQVAE(model_configs, decoder=custom_decoder)
assert model.model_config.uses_default_encoder
assert model.decoder == custom_decoder
assert not model.model_config.uses_default_decoder
class Test_Model_Saving:
def test_default_model_saving(self, tmpdir, model_configs):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = VQVAE(model_configs)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(["model_config.json", "model.pt"])
# reload model
model_rec = VQVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
def test_custom_encoder_model_saving(self, tmpdir, model_configs, custom_encoder):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = VQVAE(model_configs, encoder=custom_encoder)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(
["model_config.json", "model.pt", "encoder.pkl"]
)
# reload model
model_rec = VQVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
def test_custom_decoder_model_saving(self, tmpdir, model_configs, custom_decoder):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = VQVAE(model_configs, decoder=custom_decoder)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(
["model_config.json", "model.pt", "decoder.pkl"]
)
# reload model
model_rec = VQVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
def test_full_custom_model_saving(
self, tmpdir, model_configs, custom_encoder, custom_decoder
):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
assert set(os.listdir(dir_path)) == set(
["model_config.json", "model.pt", "encoder.pkl", "decoder.pkl"]
)
# reload model
model_rec = VQVAE.load_from_folder(dir_path)
# check configs are the same
assert model_rec.model_config.__dict__ == model.model_config.__dict__
assert all(
[
torch.equal(model_rec.state_dict()[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
def test_raises_missing_files(
self, tmpdir, model_configs, custom_encoder, custom_decoder
):
tmpdir.mkdir("dummy_folder")
dir_path = dir_path = os.path.join(tmpdir, "dummy_folder")
model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)
model.state_dict()["encoder.layers.0.0.weight"][0] = 0
model.save(dir_path=dir_path)
os.remove(os.path.join(dir_path, "decoder.pkl"))
# check raises decoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = VQVAE.load_from_folder(dir_path)
os.remove(os.path.join(dir_path, "encoder.pkl"))
# check raises encoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = VQVAE.load_from_folder(dir_path)
os.remove(os.path.join(dir_path, "model.pt"))
# check raises encoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = VQVAE.load_from_folder(dir_path)
os.remove(os.path.join(dir_path, "model_config.json"))
# check raises encoder.pkl is missing
with pytest.raises(FileNotFoundError):
model_rec = VQVAE.load_from_folder(dir_path)
class Test_Model_forward:
@pytest.fixture
def demo_data(self):
data = torch.load(os.path.join(PATH, "data/mnist_clean_train_dataset_sample"))[
:
]
return data # This is an extract of 3 data from MNIST (unnormalized) used to test custom architecture
@pytest.fixture
def vae(self, model_configs, demo_data):
model_configs.input_dim = tuple(demo_data["data"][0].shape)
return VQVAE(model_configs)
def test_model_train_output(self, vae, demo_data):
vae.train()
out = vae(demo_data)
assert isinstance(out, ModelOutput)
assert set(["loss", "recon_loss", "vq_loss", "recon_x", "z"]) == set(out.keys())
assert out.z.shape[0] == demo_data["data"].shape[0]
assert out.recon_x.shape == demo_data["data"].shape
@pytest.mark.slow
class Test_VQVAETraining:
@pytest.fixture
def train_dataset(self):
return torch.load(os.path.join(PATH, "data/mnist_clean_train_dataset_sample"))
@pytest.fixture(
params=[BaseTrainerConfig(num_epochs=3, steps_saving=2, learning_rate=1e-5)]
)
def training_configs(self, tmpdir, request):
tmpdir.mkdir("dummy_folder")
dir_path = os.path.join(tmpdir, "dummy_folder")
request.param.output_dir = dir_path
return request.param
@pytest.fixture(
params=[
torch.rand(1),
torch.rand(1),
torch.rand(1),
torch.rand(1),
torch.rand(1),
]
)
def vae(self, model_configs, custom_encoder, custom_decoder, request):
# randomized
alpha = request.param
if alpha < 0.25:
model = VQVAE(model_configs)
elif 0.25 <= alpha < 0.5:
model = VQVAE(model_configs, encoder=custom_encoder)
elif 0.5 <= alpha < 0.75:
model = VQVAE(model_configs, decoder=custom_decoder)
else:
model = VQVAE(model_configs, encoder=custom_encoder, decoder=custom_decoder)
return model
@pytest.fixture(params=[None, Adagrad, Adam, Adadelta, SGD, RMSprop])
def optimizers(self, request, vae, training_configs):
if request.param is not None:
optimizer = request.param(
vae.parameters(), lr=training_configs.learning_rate
)
else:
optimizer = None
return optimizer
def test_vae_train_step(self, vae, train_dataset, training_configs, optimizers):
trainer = BaseTrainer(
model=vae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
start_model_state_dict = deepcopy(trainer.model.state_dict())
step_1_loss = trainer.train_step(epoch=1)
step_1_model_state_dict = deepcopy(trainer.model.state_dict())
# check that weights were updated
assert not all(
[
torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])
for key in start_model_state_dict.keys()
]
)
def test_vae_eval_step(self, vae, train_dataset, training_configs, optimizers):
trainer = BaseTrainer(
model=vae,
train_dataset=train_dataset,
eval_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
start_model_state_dict = deepcopy(trainer.model.state_dict())
step_1_loss = trainer.eval_step(epoch=1)
step_1_model_state_dict = deepcopy(trainer.model.state_dict())
# check that weights were updated
assert all(
[
torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])
for key in start_model_state_dict.keys()
]
)
def test_vae_main_train_loop(
self, tmpdir, vae, train_dataset, training_configs, optimizers
):
trainer = BaseTrainer(
model=vae,
train_dataset=train_dataset,
eval_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
start_model_state_dict = deepcopy(trainer.model.state_dict())
trainer.train()
step_1_model_state_dict = deepcopy(trainer.model.state_dict())
# check that weights were updated
assert not all(
[
torch.equal(start_model_state_dict[key], step_1_model_state_dict[key])
for key in start_model_state_dict.keys()
]
)
def test_checkpoint_saving(
self, tmpdir, vae, train_dataset, training_configs, optimizers
):
dir_path = training_configs.output_dir
trainer = BaseTrainer(
model=vae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
# Make a training step
step_1_loss = trainer.train_step(epoch=1)
model = deepcopy(trainer.model)
optimizer = deepcopy(trainer.optimizer)
trainer.save_checkpoint(dir_path=dir_path, epoch=0, model=model)
checkpoint_dir = os.path.join(dir_path, "checkpoint_epoch_0")
assert os.path.isdir(checkpoint_dir)
files_list = os.listdir(checkpoint_dir)
assert set(["model.pt", "optimizer.pt", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not vae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not vae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
model_rec_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))[
"model_state_dict"
]
assert all(
[
torch.equal(
model_rec_state_dict[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
# check reload full model
model_rec = VQVAE.load_from_folder(os.path.join(checkpoint_dir))
assert all(
[
torch.equal(
model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())
assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())
optim_rec_state_dict = torch.load(os.path.join(checkpoint_dir, "optimizer.pt"))
assert all(
[
dict_rec == dict_optimizer
for (dict_rec, dict_optimizer) in zip(
optim_rec_state_dict["param_groups"],
optimizer.state_dict()["param_groups"],
)
]
)
assert all(
[
dict_rec == dict_optimizer
for (dict_rec, dict_optimizer) in zip(
optim_rec_state_dict["state"], optimizer.state_dict()["state"]
)
]
)
def test_checkpoint_saving_during_training(
self, tmpdir, vae, train_dataset, training_configs, optimizers
):
#
target_saving_epoch = training_configs.steps_saving
dir_path = training_configs.output_dir
trainer = BaseTrainer(
model=vae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
model = deepcopy(trainer.model)
trainer.train()
training_dir = os.path.join(
dir_path, f"VQVAE_training_{trainer._training_signature}"
)
assert os.path.isdir(training_dir)
checkpoint_dir = os.path.join(
training_dir, f"checkpoint_epoch_{target_saving_epoch}"
)
assert os.path.isdir(checkpoint_dir)
files_list = os.listdir(checkpoint_dir)
# check files
assert set(["model.pt", "optimizer.pt", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not vae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not vae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
model_rec_state_dict = torch.load(os.path.join(checkpoint_dir, "model.pt"))[
"model_state_dict"
]
assert not all(
[
torch.equal(model_rec_state_dict[key], model.state_dict()[key])
for key in model.state_dict().keys()
]
)
def test_final_model_saving(
self, tmpdir, vae, train_dataset, training_configs, optimizers
):
dir_path = training_configs.output_dir
trainer = BaseTrainer(
model=vae,
train_dataset=train_dataset,
training_config=training_configs,
optimizer=optimizers,
)
trainer.train()
model = deepcopy(trainer._best_model)
training_dir = os.path.join(
dir_path, f"VQVAE_training_{trainer._training_signature}"
)
assert os.path.isdir(training_dir)
final_dir = os.path.join(training_dir, f"final_model")
assert os.path.isdir(final_dir)
files_list = os.listdir(final_dir)
assert set(["model.pt", "model_config.json", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not vae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not vae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
# check reload full model
model_rec = VQVAE.load_from_folder(os.path.join(final_dir))
assert all(
[
torch.equal(
model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())
assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())
def test_vae_training_pipeline(self, tmpdir, vae, train_dataset, training_configs):
dir_path = training_configs.output_dir
# build pipeline
pipeline = TrainingPipeline(model=vae, training_config=training_configs)
# Launch Pipeline
pipeline(
train_data=train_dataset.data, # gives tensor to pipeline
eval_data=train_dataset.data, # gives tensor to pipeline
)
model = deepcopy(pipeline.trainer._best_model)
training_dir = os.path.join(
dir_path, f"VQVAE_training_{pipeline.trainer._training_signature}"
)
assert os.path.isdir(training_dir)
final_dir = os.path.join(training_dir, f"final_model")
assert os.path.isdir(final_dir)
files_list = os.listdir(final_dir)
assert set(["model.pt", "model_config.json", "training_config.json"]).issubset(
set(files_list)
)
# check pickled custom decoder
if not vae.model_config.uses_default_decoder:
assert "decoder.pkl" in files_list
else:
assert not "decoder.pkl" in files_list
# check pickled custom encoder
if not vae.model_config.uses_default_encoder:
assert "encoder.pkl" in files_list
else:
assert not "encoder.pkl" in files_list
# check reload full model
model_rec = VQVAE.load_from_folder(os.path.join(final_dir))
assert all(
[
torch.equal(
model_rec.state_dict()[key].cpu(), model.state_dict()[key].cpu()
)
for key in model.state_dict().keys()
]
)
assert type(model_rec.encoder.cpu()) == type(model.encoder.cpu())
assert type(model_rec.decoder.cpu()) == type(model.decoder.cpu())
| [
"torch.rand",
"torch.equal"
] | 1.9.0 | clementchadebec/benchmark_VAE | 943e231f9e5dfa40b4eec14d4536f1c229ad9be1 |
1.7 | import torch
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from typing import Any
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class AlexNet(nn.Module):
'''
AlexNet modified (features) for CIFAR10. Source: https://github.com/icpm/pytorch-cifar10/blob/master/models/AlexNet.py.
'''
def __init__(self, num_classes: int = 1000, use_dropout=False) -> None:
super(AlexNet, self).__init__()
self.use_dropout = use_dropout
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
# self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.fc_block = nn.Sequential(
nn.Linear(256 * 2 * 2, 4096, bias=False),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
nn.Linear(4096, 4096, bias=False),
nn.BatchNorm1d(4096),
nn.ReLU(inplace=True),
)
self.classifier = nn.Sequential(
nn.Linear(4096, num_classes),
)
self.penultimate_active = False
self.drop = nn.Dropout(p=0.5)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
# x = self.avgpool(x)
z = torch.flatten(x, 1)
if self.use_dropout:
x = self.drop(x)
z = self.fc_block(z)
x = self.classifier(z)
if self.penultimate_active:
return z, x
return x
def alexnet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> AlexNet:
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = AlexNet(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['alexnet'],
progress=progress)
model.load_state_dict(state_dict)
return model | [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.utils.model_zoo.load_url",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.flatten"
] | 1.7.1 | acl21/deep-active-learning-pytorch | 637fd507235632903bcf84ed841ff524d847b94e |
1.6 | import numpy as np
import scipy.stats
import torch as t
from torch.distributions import Normal, Poisson
import math
from .utils import betaln, binomln, kappa_marginal_moments
# flat gaussian mixture
class GaussianMixture:
def __init__(self, mus, sigs):
"""
Args:
- mus: (t.tensor) vector of component means [shape (n_particles,)]
- sigs: (t.tensor) vector of component variances [shape (n_particles,)]
"""
self._kernel = Normal(loc=mus, scale=sigs)
self._n_particles = len(mus)
def log_prob(self, lam, keepdims=False):
"""
Log probability of scalar poisson rate under
the Gaussian Mixture.
Args:
- lam: (scalar/size 0 tensor) Poisson rate
"""
if type(lam) == float:
lam = t.tensor([lam])
elif len(lam.size()) == 0:
lam = lam.view(1)
log_p = t.logsumexp(
self._kernel.log_prob(lam[:, None]), dim=1, keepdims=keepdims
)
normalize = math.log(self._n_particles)
return log_p - normalize
def sample(self, n_samples):
return self._kernel.sample(n_samples)
class PoissonMixture:
def __init__(self, rates):
"""
Args:
- rates: (t.tensor) vector of component means [shape (n_particles,)]
"""
self._kernel = Poisson(rates)
self._n_particles = len(rates)
self.support = (0, np.inf)
def log_prob(self, x, keepdims=False):
"""
Log probability of scalar count under
the Poisson Mixture.
Args:
- x: (scalar/size 0 tensor) count
"""
if type(x) == float:
x = t.tensor([x])
elif len(x.size()) == 0:
x = x.view(1)
log_p = t.logsumexp(self._kernel.log_prob(x), dim=1, keepdims=keepdims)
normalize = math.log(self._n_particles)
return log_p - normalize
def sample(self, n_samples):
return self._kernel.sample(n_samples)
class LaggedBetaBinomial:
def __init__(self, ys, alphas, betas, prior):
"""
beta-binomial emission distribution
Args:
- ys: (t.tensor) [shape (n_lag_steps,)]
- alphas (t.tensor) first beta shape parameters for thinning prior [shape (n_lag_steps,)]
- alphas (t.tensor) second beta shape parameters for thinning prior [shape (n_lag_steps,)]
- prior_x (distribution) prior distribution on the true count. Must implement log_prob
"""
# support of the true count for latent marginalization
try:
lower = prior.support[0]
except:
lower = prior.support.lower_bound
self._support_lower = max(lower, int(ys.max()))
self._ys = ys
self._alphas = alphas
self._betas = betas
self._prior = prior
self._left = self._ys + self._alphas # (n_lag_steps,)
self._right = betas - ys # (n_lag_steps,)
self._log_denom = betaln(alphas, betas) # (n_lag_steps,)
def log_prob(self, x, support_check=True):
if support_check:
if x < self._support_lower:
return t.tensor([-np.inf])
if type(x) == float:
x = t.tensor([x])
elif len(x.size()) == 0:
x = x.view(1)
right = x[None, :] + self._right[:, None] # (n_lag_steps, x_dim)
log_num = betaln(self._left[:, None], right) # (n_lag_steps, x_dim)
log_binom = binomln(x[None, :], self._ys[:, None]) # (n_lag_steps, x_dim)
log_prior = self._prior.log_prob(x[:, None]) # [:, None]) # (x_dim, prior_dim)
log_beta_binom = (
log_binom[:, :, None]
+ log_num[:, :, None]
- self._log_denom[:, None, None]
+ log_prior[None, :]
) # (n_lag_steps, x_dim, prior_dim)
return log_beta_binom.sum(dim=0) # (x_dim, prior_dim)
def log_marginal(self, support_max=100):
try:
upper = prior.support[1]
except:
upper = support_max
alpha_f, beta_f = self._alphas[-1], self._betas[-1]
mu = alpha_f / (alpha_f + beta_f) # 0.5, var = 0.5
sig = np.sqrt(
alpha_f * beta_f / ((alpha_f + beta_f) ** 2 * (alpha_f + beta_f + 1))
)
xs = t.arange(self._support_lower, support_max).float()
return self.log_prob(xs, support_check=False).logsumexp(dim=0) # (prior_dim,)
class PoissonBetaBinomial:
def __init__(self, ys, alphas, betas, prior_x, prior_lam):
"""
Args:
- ys: (t.tensor) vector of reported counts [shape (n_lag_steps,)]
- alphas: (t.tensor) vector of beta prior shape parameters [shape (n_lag_steps,)]
- betas: (t.tensor) vector of beta prior shape parameters [shape (n_lag_steps,)]
- prior_x: (distribution) prior distribution on true count [must implement log_prob]
- prior_lam: (distribution) prior distribution on poisson rate [must implement log_prob]
"""
# support of the true count for latent marginalization
support_lower = max(prior_x.support[0], int(ys.max()))
self._xs = t.arange(support_lower, prior_x.support[1]).float()
# set prior
self._prior_lam = prior_lam
self._log_beta_binom = self._beta_binom(ys, alphas, betas, prior_x)
self._poisson_log_norm = t.lgamma(self._xs + 1)
# utils for sampler
self.is_continuous = True
self.support = (0, np.inf)
def log_prob(self, lam, support_check=True):
"""
log-probability of scalar poisson rate under
the Poisson beta-binomial emission model.
Args:
- lam: (float or 0-dim tensor) poisson rate
"""
support_lower, support_upper = self.support
if support_check:
if not self._support_check(lam):
return -np.inf
if type(lam) == float:
lam = t.tensor([lam])
elif len(lam.size()) == 0:
lam = lam.view(1)
log_poisson = (
self._xs[:, None] * np.log(lam)[None, :]
- lam[None, :]
- self._poisson_log_norm[:, None]
) # (x_dim, lam_dim)
log_series = (log_poisson[None, :] + self._log_beta_binom[:, :, None]).sum(
axis=0
) # (x_dim, lam_dim)
if self._prior_lam is None:
log_prior_lam = 0.
else:
log_prior_lam = self._prior_lam.log_prob(lam) # (lam_dim)
log_prob_lam = t.logsumexp(log_series, dim=0) + log_prior_lam # (lam_dim)
return log_prob_lam
def _beta_binom(self, ys, alphas, betas, prior_x):
"""
beta-binomial emission distribution
Args:
- ys: (t.tensor) [shape (n_lag_steps,)]
- alphas (t.tensor) first beta shape parameters for thinning prior [shape (n_lag_steps,)]
- alphas (t.tensor) second beta shape parameters for thinning prior [shape (n_lag_steps,)]
- prior_x (distribution) prior distribution on the true count. Must implement log_prob
"""
xs = self._xs
left = (ys + alphas)[:, None] # (n_lag_steps, 1)
right = xs - ys[:, None] + betas[:, None] # (n_lag_steps, x_dim)
log_num = betaln(left, right) # (n_lag_steps, x_dim)
log_binom = binomln(xs[None, :], ys[:, None]) # (n_lag_steps, x_dim)
log_denom = betaln(alphas, betas)[:, None] # (n_lag_steps, 1)
log_prior_x = prior_x.log_prob(xs) # (x_dim)
log_beta_binom = (
log_binom + log_num - log_denom + log_prior_x
) # (n_lag_steps, x_dim)
return log_beta_binom
def _support_check(self, lam):
return self.support[0] <= lam <= self.support[1]
class CountSmoothingDistribution:
def __init__(self, ys, a, b, lambda_smoothing_particles):
prior = Poisson(lambda_smoothing_particles)
self._emission = LaggedBetaBinomial(ys, a, b, prior)
_n_particles = len(lambda_smoothing_particles)
self._log_normalizer = math.log(_n_particles)
def log_prob(self, x):
weights = self._emission.log_prob(
x, support_check=False
) - self._emission.log_marginal(support_max=x.max())
lp = t.logsumexp(weights, dim=1) - self._log_normalizer
return lp
class AdditiveDriftDistribution:
def __init__(self, kappa_sigma, prior_particles, emission_dist):
self._prior_lambdas = prior_particles[:, 0].squeeze().numpy()
self._prior_kappas = prior_particles[:, 1].squeeze().numpy()
self._kappa_sigma = kappa_sigma
self.y_likelihood = emission_dist
def sample(self, length_scale, burn_in=100, thin=100, n_samples=500):
n_steps = n_samples * thin + burn_in
kappa_proposal_dist = Normal(0, scale=length_scale)
deltas = kappa_proposal_dist.sample([n_steps]).squeeze()
# init
kap = np.mean(self._prior_kappas)
lam_idx = np.random.choice(np.arange(len(self._prior_lambdas)))
lam = (t.tensor([self._prior_lambdas[lam_idx]]) + kap).abs()
ll = self.y_likelihood.log_prob(lam) + Normal(
self._prior_kappas[lam_idx], self._kappa_sigma
).log_prob(kap)
samples = []
for i, delta in enumerate(deltas):
# sampler_pbar.update()
kap_p = kap + delta
lam_idx = np.random.choice(np.arange(len(self._prior_lambdas)))
lam_p = kap_p + self._prior_lambdas[lam_idx]
weight = sum(self._prior_lambdas == self._prior_lambdas[lam_idx]).item()
# component likelihood
lam_p_ll = self.y_likelihood.log_prob(lam_p)
kap_p_ll = Normal(self._prior_kappas[lam_idx], self._kappa_sigma).log_prob(
kap_p
)
p_ll = lam_p_ll + kap_p_ll + np.log(weight)
log_prob_accept = p_ll - ll
if log_prob_accept > 0:
accept = True
else:
p = t.exp(log_prob_accept).item()
accept = np.random.choice([True, False], p=[p, 1 - p])
if accept:
kap = kap_p
lam = lam_p
ll = p_ll
samples.append(t.tensor([lam, kap]))
return t.stack(samples[burn_in:][::thin])
class DriftSmoothingDistribution:
def __init__(
self,
lambda_filtering_particles,
lambda_smoothing_particles,
prior_kappa_loc,
prior_kappa_scale,
random_walk_scale,
):
# required for kappa log probability
self._filtering = lambda_filtering_particles
self._smoothing = lambda_smoothing_particles
self._rw_scale = random_walk_scale
self._prior_kappa = Normal(loc=prior_kappa_loc, scale=prior_kappa_scale)
# Marginal normalizer is a gaussian mixture
mixture_locs, mixture_scales = kappa_marginal_moments(
prior_kappa_loc,
prior_kappa_scale,
random_walk_scale,
lambda_filtering_particles,
)
normalizer = GaussianMixture(mixture_locs, mixture_scales)
self._row_norm = normalizer.log_prob(lambda_smoothing_particles, keepdims=True)
def log_prob(self, kappa):
# prior probability of kappa
log_prior = self._prior_kappa.log_prob(kappa)
# likelihood function for kappa marginalized over the filtering and smoothing distributions
transition_log_proba = self.particle_transition_matrix(kappa)
marginal_log_likelihood = t.logsumexp(
transition_log_proba - self._row_norm, dim=(0, 1)
)
# smoothing probability for kappa
particle_norm = math.log(self._smoothing.shape[0]) + math.log(
self._filtering.shape[0]
)
lp = log_prior + marginal_log_likelihood - particle_norm
return lp
def particle_transition_matrix(self, kappa):
tm_loc = kappa * self._filtering
tm_scale = self._rw_scale
transition_dist = Normal(loc=tm_loc, scale=tm_scale)
transition_log_prob_matrix = transition_dist.log_prob(self._smoothing[:, None])
return transition_log_prob_matrix
# This lets me sample lam
class CorrectedPoissonBetaBinomial:
def __init__(
self, ys, alphas, betas, prior_x, prior_lam, prior_correction, multidim=False
):
self._pbb = PoissonBetaBinomial(ys, alphas, betas, prior_x, prior_lam=None)
self._prior_lam = prior_lam
self._prior_correction = prior_correction
self._multidim = multidim
def log_prob(self, lam, support_check=True):
if not self._multidim:
if support_check:
if lam < 0:
return -np.inf
# LAM MUST BE SCALAR HERE
effective_lam = lam * self._prior_correction.values
if self._prior_lam is None:
prior_lam_term = 0.
else:
prior_lam_term = self._prior_lam.log_prob(lam) # (lam_dim)
lp = t.logsumexp(
self._pbb.log_prob(effective_lam, support_check=False)
+ self._prior_correction.log_probas,
axis=0,
)
lp = lp + prior_lam_term
else:
effective_lam = (
lam[:, None] * self._prior_correction.values[None, :]
) # (lam_dim, z_dim)
if self._prior_lam is None:
prior_lam_term = 0.
else:
prior_lam_term = self._prior_lam.log_prob(lam) # (lam_dim)
pbb_proba = self._pbb.log_prob(effective_lam.view(-1), support_check=False)
lp = t.logsumexp(
pbb_proba.view(effective_lam.shape) + self._prior_correction.log_probas,
axis=1,
)
lp = lp + prior_lam_term
return lp
class DiscreteDistribution:
def __init__(self, values, log_probas):
self.values = values
self.log_probas = log_probas
class EmpiricalDistribution:
def __init__(self, support, probas):
"""
Args:
- support: (tuple) edges of support. Support assumed to exist on all integers between. [shape (2,)]
- probas: (t.tensor) probabilities for each element of support. [shape (support[1] - support[0],)]
"""
self.support = support
self.probas = probas
self._xs = t.arange(support[0], support[1]).float()
def log_prob(self, x):
return self.probas[x.int() - self.support[0]]
def sample(self, size):
idxs = np.arange(0, len(self._xs))
sample_idxs = np.random.choice(idxs, p=self.probas, size=size)
samples = self._xs[sample_idxs]
return samples
| [
"torch.stack",
"torch.arange",
"torch.distributions.Normal",
"torch.logsumexp",
"torch.tensor",
"torch.distributions.Poisson",
"torch.lgamma",
"torch.exp"
] | 1.6 | anguswilliams91/jbc-turing-rss-nowcasting | 8c91e568dcf0dfcdf48e03cac86ad01bc47f8dcc |
1.1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap,
encode_target):
super(MultiBoxLoss, self).__init__()
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = [0.1, 0.2]
def forward(self, predictions, priors, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
ground_truth (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
loc_data, conf_data = predictions
priors = priors
num = loc_data.size(0)
num_priors = (priors.size(0))
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
loc_t = torch.zeros((num, num_priors, 4), dtype=torch.float32)
conf_t = torch.zeros((num, num_priors), dtype=torch.int64)
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx)
if torch.cuda.is_available():
loc_t = loc_t.cuda()
conf_t = conf_t.cuda()
pos = conf_t > 0
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
loc_p = loc_data[pos_idx].view(-1, 4)
loc_t = loc_t[pos_idx].view(-1, 4)
loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum')
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now
loss_c = loss_c.view(num, -1)
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)
targets_weighted = conf_t[(pos + neg).gt(0)]
loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum')
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
N = max(num_pos.data.sum().float(), 1)
loss_l /= N
loss_c /= N
return loss_l, loss_c
| [
"torch.zeros",
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available",
"torch.nn.functional.smooth_l1_loss"
] | 1.1.0 | ka-ryo/M2Det | d947f135e7aad996da43f5fe3a350eeead237fd0 |
1.2 | # coding: utf-8
"""
Training module
"""
from itertools import count
import argparse
import time
import shutil
from typing import List, Dict
import os
from os.path import join
import queue
from functools import partial
import random
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from torchtext.data import Dataset
from entmax import Entmax15Loss, SparsemaxLoss, EntmaxBisectLoss
from joeynmt.model import build_model
from joeynmt.batch import Batch
from joeynmt.helpers import log_data_info, load_config, log_cfg, \
store_attention_plots, load_checkpoint, make_model_dir, \
make_logger, set_seed, symlink_update, ConfigurationError, postprocess
from joeynmt.model import Model
from joeynmt.prediction import validate_on_data
from joeynmt.loss import LabelSmoothingLoss, FYLabelSmoothingLoss
from joeynmt.data import load_data, make_data_iter
from joeynmt.builders import build_optimizer, build_scheduler, \
build_gradient_clipper
from joeynmt.prediction import test
def _parse_hidden_size(model_config):
if "encoder" in model_config:
return model_config["encoder"]["hidden_size"]
encs_config = model_config["encoders"]
if "encoder" in encs_config:
return encs_config["encoder"]["hidden_size"]
return next(cf["hidden_size"] for cf in encs_config.values()
if "hidden_size" in cf)
# pylint: disable=too-many-instance-attributes
class TrainManager:
""" Manages training loop, validations, learning rate scheduling
and early stopping."""
def __init__(self, model: Model, config: dict) -> None:
"""
Creates a new TrainManager for a model, specified as in configuration.
:param model: torch module defining the model
:param config: dictionary containing the training configurations
"""
train_config = config["training"]
# files for logging and storing
self.model_dir = train_config["model_dir"]
make_model_dir(
self.model_dir, overwrite=train_config.get("overwrite", False)
)
self.logger = make_logger(model_dir=self.model_dir)
self.logging_freq = train_config.get("logging_freq", 100)
self.valid_report_file = join(self.model_dir, "validations.txt")
self.tb_writer = SummaryWriter(
log_dir=join(self.model_dir, "tensorboard/")
)
# model
self.model = model
self.pad_index = self.model.pad_index
self._log_parameters_list()
# objective
objective = train_config.get("loss", "cross_entropy")
loss_alpha = train_config.get("loss_alpha", 1.5)
assert loss_alpha >= 1
# maybe don't do the label smoothing thing here, instead have
# nn.CrossEntropyLoss
# then you look up the loss func, and you either use it directly or
# wrap it in FYLabelSmoothingLoss
if objective == "softmax":
objective = "cross_entropy"
loss_funcs = {
"cross_entropy": nn.CrossEntropyLoss,
"entmax15": partial(Entmax15Loss, k=512),
"sparsemax": partial(SparsemaxLoss, k=512),
"entmax": partial(EntmaxBisectLoss, alpha=loss_alpha, n_iter=30)
}
if objective not in loss_funcs:
raise ConfigurationError("Unknown loss function")
loss_module = loss_funcs[objective]
loss_func = loss_module(ignore_index=self.pad_index, reduction='sum')
label_smoothing = train_config.get("label_smoothing", 0.0)
label_smoothing_type = train_config.get("label_smoothing_type", "fy")
assert label_smoothing_type in ["fy", "szegedy"]
smooth_dist = train_config.get("smoothing_distribution", "uniform")
assert smooth_dist in ["uniform", "unigram"]
if label_smoothing > 0:
if label_smoothing_type == "fy":
# label smoothing entmax loss
if smooth_dist is not None:
smooth_p = torch.FloatTensor(model.trg_vocab.frequencies)
smooth_p /= smooth_p.sum()
else:
smooth_p = None
loss_func = FYLabelSmoothingLoss(
loss_func, smoothing=label_smoothing, smooth_p=smooth_p
)
else:
assert objective == "cross_entropy"
loss_func = LabelSmoothingLoss(
ignore_index=self.pad_index,
reduction="sum",
smoothing=label_smoothing
)
self.loss = loss_func
self.norm_type = train_config.get("normalization", "batch")
if self.norm_type not in ["batch", "tokens"]:
raise ConfigurationError("Invalid normalization. "
"Valid options: 'batch', 'tokens'.")
# optimization
self.learning_rate_min = train_config.get("learning_rate_min", 1.0e-8)
self.clip_grad_fun = build_gradient_clipper(config=train_config)
self.optimizer = build_optimizer(
config=train_config, parameters=model.parameters())
# validation & early stopping
self.validate_by_label = train_config.get("validate_by_label", False)
self.validation_freq = train_config.get("validation_freq", 1000)
self.log_valid_sents = train_config.get("print_valid_sents", [0, 1, 2])
self.plot_attention = train_config.get("plot_attention", False)
self.ckpt_queue = queue.Queue(
maxsize=train_config.get("keep_last_ckpts", 5))
allowed = {'bleu', 'chrf', 'token_accuracy',
'sequence_accuracy', 'cer', "wer", "levenshtein_distance"}
eval_metrics = train_config.get("eval_metric", "bleu")
if isinstance(eval_metrics, str):
eval_metrics = [eval_metrics]
if any(metric not in allowed for metric in eval_metrics):
ok_metrics = " ".join(allowed)
raise ConfigurationError("Invalid setting for 'eval_metric', "
"valid options: {}".format(ok_metrics))
self.eval_metrics = eval_metrics
self.forced_sparsity = train_config.get("forced_sparsity", False)
early_stop_metric = train_config.get("early_stopping_metric", "loss")
allowed_early_stop = {"ppl", "loss"} | set(self.eval_metrics)
if early_stop_metric not in allowed_early_stop:
raise ConfigurationError(
"Invalid setting for 'early_stopping_metric', "
"valid options: 'loss', 'ppl', and eval_metrics.")
self.early_stopping_metric = early_stop_metric
min_metrics = {"ppl", "loss", "cer", "wer", "levenshtein_distance"}
self.minimize_metric = early_stop_metric in min_metrics
# learning rate scheduling
hidden_size = _parse_hidden_size(config["model"])
self.scheduler, self.sched_incr = build_scheduler(
config=train_config,
scheduler_mode="min" if self.minimize_metric else "max",
optimizer=self.optimizer,
hidden_size=hidden_size)
# data & batch handling
# src/trg magic
if "level" in config["data"]:
self.src_level = self.trg_level = config["data"]["level"]
else:
assert "src_level" in config["data"]
assert "trg_level" in config["data"]
self.src_level = config["data"]["src_level"]
self.trg_level = config["data"]["trg_level"]
self.shuffle = train_config.get("shuffle", True)
self.epochs = train_config["epochs"]
self.batch_size = train_config["batch_size"]
self.batch_type = train_config.get("batch_type", "sentence")
self.eval_batch_size = train_config.get("eval_batch_size",
self.batch_size)
self.eval_batch_type = train_config.get("eval_batch_type",
self.batch_type)
self.batch_multiplier = train_config.get("batch_multiplier", 1)
# generation
self.max_output_length = train_config.get("max_output_length", None)
# CPU / GPU
self.use_cuda = train_config["use_cuda"]
if self.use_cuda:
self.model.cuda()
self.loss.cuda()
# initialize training statistics
self.steps = 0
# stop training if this flag is True by reaching learning rate minimum
self.stop = False
self.total_tokens = 0
self.best_ckpt_iteration = 0
# initial values for best scores
self.best_ckpt_score = np.inf if self.minimize_metric else -np.inf
mrt_schedule = train_config.get("mrt_schedule", None)
assert mrt_schedule is None or mrt_schedule in ["warmup", "mix", "mtl"]
self.mrt_schedule = mrt_schedule
self.mrt_p = train_config.get("mrt_p", 0.0)
self.mrt_lambda = train_config.get("mrt_lambda", 1.0)
assert 0 <= self.mrt_p <= 1
assert 0 <= self.mrt_lambda <= 1
self.mrt_start_steps = train_config.get("mrt_start_steps", 0)
self.mrt_samples = train_config.get("mrt_samples", 1)
self.mrt_alpha = train_config.get("mrt_alpha", 1.0)
self.mrt_strategy = train_config.get("mrt_strategy", "sample")
self.mrt_cost = train_config.get("mrt_cost", "levenshtein")
self.mrt_max_len = train_config.get("mrt_max_len", 31) # hmm
self.step_counter = count()
assert self.mrt_alpha > 0
assert self.mrt_strategy in ["sample", "topk"]
assert self.mrt_cost in ["levenshtein", "bleu"]
# model parameters
if "load_model" in train_config.keys():
model_load_path = train_config["load_model"]
reset_training = train_config.get("reset_training", False)
self.logger.info("Loading model from %s", model_load_path)
self.init_from_checkpoint(model_load_path, reset=reset_training)
def is_best(self, score):
return self.minimize_metric == (score < self.best_ckpt_score)
def _save_checkpoint(self) -> None:
"""
Save the model's current parameters and the training state to a
checkpoint.
The training state contains the total number of training steps,
the total number of training tokens,
the best checkpoint score and iteration so far,
and optimizer and scheduler states.
"""
ckpt_name = str(self.steps) + ".ckpt"
model_path = join(self.model_dir, ckpt_name)
if self.scheduler is not None:
scheduler_state = self.scheduler.state_dict()
else:
scheduler_state = None
state = {
"steps": self.steps,
"total_tokens": self.total_tokens,
"best_ckpt_score": self.best_ckpt_score,
"best_ckpt_iteration": self.best_ckpt_iteration,
"model_state": self.model.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
"scheduler_state": scheduler_state
}
torch.save(state, model_path)
if self.ckpt_queue.full():
to_delete = self.ckpt_queue.get() # delete oldest ckpt
try:
os.remove(to_delete)
except FileNotFoundError:
self.logger.warning("Wanted to delete old checkpoint %s but "
"file does not exist.", to_delete)
self.ckpt_queue.put(model_path)
# create/modify symbolic link for best checkpoint
symlink_update(ckpt_name, join(self.model_dir, "best.ckpt"))
def init_from_checkpoint(self, path: str, reset: bool = False):
"""
Initialize the trainer from a given checkpoint file.
This checkpoint file contains not only model parameters, but also
scheduler and optimizer states, see `self._save_checkpoint`.
:param path: path to checkpoint
"""
model_checkpoint = load_checkpoint(path=path, use_cuda=self.use_cuda)
# restore model and optimizer parameters
self.model.load_state_dict(model_checkpoint["model_state"])
if not reset:
self.optimizer.load_state_dict(model_checkpoint["optimizer_state"])
scheduler_state = model_checkpoint["scheduler_state"]
if scheduler_state is not None and self.scheduler is not None:
self.scheduler.load_state_dict(scheduler_state)
# restore counts
self.steps = model_checkpoint["steps"]
self.total_tokens = model_checkpoint["total_tokens"]
self.best_ckpt_score = model_checkpoint["best_ckpt_score"]
self.best_ckpt_iteration = model_checkpoint["best_ckpt_iteration"]
# move parameters to cuda
if self.use_cuda:
self.model.cuda()
def log_tensorboard(self, split, **kwargs):
"""
split: "train" or "valid"
"""
assert split in ["train", "valid"]
prefix = "{0}/{0}_".format(split)
for metric, value in kwargs.items():
name = prefix + metric
self.tb_writer.add_scalar(name, value, self.steps)
def train_and_validate(self, train_data: Dataset, valid_data: Dataset):
"""
Train the model and validate it on the validation set.
:param train_data: training data
:param valid_data: validation data
"""
train_iter = make_data_iter(
train_data,
batch_size=self.batch_size,
batch_type=self.batch_type,
train=True,
shuffle=self.shuffle,
use_cuda=self.use_cuda
)
for epoch_no in range(1, self.epochs + 1):
self.logger.info("EPOCH %d", epoch_no)
if self.sched_incr == "epoch":
self.scheduler.step(epoch=epoch_no - 1) # 0-based indexing
self.model.train()
start = time.time()
total_valid_duration = 0
processed_tokens = self.total_tokens
epoch_loss = 0
for i, batch in enumerate(iter(train_iter), 1):
# reactivate training
self.model.train()
# create a Batch object from torchtext batch
batch = Batch(batch, self.pad_index)
# only update every batch_multiplier batches
update = i % self.batch_multiplier == 0
batch_loss = self._train_batch(batch, update=update)
self.log_tensorboard("train", batch_loss=batch_loss)
epoch_loss += batch_loss.cpu().numpy()
if not update:
continue
if self.sched_incr == "step":
self.scheduler.step()
# log learning progress
if self.steps % self.logging_freq == 0:
elapsed = time.time() - start - total_valid_duration
elapsed_tokens = self.total_tokens - processed_tokens
self.logger.info(
"Epoch %3d Step: %8d Batch Loss: %12.6f "
"Tokens per Sec: %8.0f, Lr: %.6f",
epoch_no, self.steps, batch_loss,
elapsed_tokens / elapsed,
self.optimizer.param_groups[0]["lr"])
start = time.time()
total_valid_duration = 0
processed_tokens = self.total_tokens
# validate on the entire dev set
if self.steps % self.validation_freq == 0:
valid_start_time = time.time()
# it would be nice to include loss and ppl in valid_scores
valid_scores, valid_references, valid_hypotheses, \
valid_hypotheses_raw, valid_attention_scores, \
scores_by_label = validate_on_data(
batch_size=self.eval_batch_size,
data=valid_data,
eval_metrics=self.eval_metrics,
trg_level=self.trg_level,
model=self.model,
use_cuda=self.use_cuda,
max_output_length=self.max_output_length,
loss_function=self.loss,
beam_size=0, # greedy validations
batch_type=self.eval_batch_type,
save_attention=self.plot_attention,
validate_by_label=self.validate_by_label,
forced_sparsity=self.forced_sparsity
)
ckpt_score = valid_scores[self.early_stopping_metric]
self.log_tensorboard("valid", **valid_scores)
new_best = False
if self.is_best(ckpt_score):
self.best_ckpt_score = ckpt_score
self.best_ckpt_iteration = self.steps
self.logger.info(
'Hooray! New best validation result [%s]!',
self.early_stopping_metric)
if self.ckpt_queue.maxsize > 0:
self.logger.info("Saving new checkpoint.")
new_best = True
self._save_checkpoint()
if self.sched_incr == "validation":
self.scheduler.step(ckpt_score)
# append to validation report
self._add_report(valid_scores, new_best=new_best)
valid_sources_raw = {f: list(getattr(valid_data, f))
for f in valid_data.fields
if f != "trg"}
self._log_examples(
sources_raw=valid_sources_raw,
hypotheses_raw=valid_hypotheses_raw,
hypotheses=valid_hypotheses,
references=valid_references
)
labeled_scores = sorted(valid_scores.items())
eval_report = ", ".join("{}: {:.5f}".format(n, v)
for n, v in labeled_scores)
valid_duration = time.time() - valid_start_time
total_valid_duration += valid_duration
self.logger.info(
'Validation result at epoch %3d, step %8d: %s, '
'duration: %.4fs',
epoch_no, self.steps, eval_report, valid_duration)
if scores_by_label is not None:
for metric, scores in scores_by_label.items():
# make a report
label_report = [metric]
numbers = sorted(scores.items())
label_report.extend(
["{}={}: {:.5f}".format(l, n, v)
for (l, n), v in numbers]
)
self.logger.info("\n\t".join(label_report))
# store validation set outputs
self._store_outputs(valid_hypotheses)
# store attention plots for selected valid sentences
if valid_attention_scores and self.plot_attention:
store_attention_plots(
attentions=valid_attention_scores,
sources=list(valid_data.src),
targets=valid_hypotheses_raw,
indices=self.log_valid_sents,
model_dir=self.model_dir,
tb_writer=self.tb_writer,
steps=self.steps)
if self.stop:
break
if self.stop:
self.logger.info(
'Training ended because minimum lr %f was reached.',
self.learning_rate_min)
break
self.logger.info(
'Epoch %3d: total training loss %.2f', epoch_no, epoch_loss)
else:
self.logger.info('Training ended after %3d epochs.', epoch_no)
self.logger.info('Best validation result at step %8d: %6.2f %s.',
self.best_ckpt_iteration, self.best_ckpt_score,
self.early_stopping_metric)
self.tb_writer.close() # close Tensorboard writer
def _train_batch(self, batch: Batch, update: bool = True) -> Tensor:
"""
Train the model on one batch: Compute the loss, make a gradient step.
:param batch: training batch
:param update: if False, only store gradient. if True also make update
:return: loss for batch (sum)
"""
times_called = next(self.step_counter)
# when do you call get_risk_for batch?
mrt_schedule = self.mrt_schedule
mrt_steps = self.mrt_start_steps
warmed_up = mrt_schedule == "warmup" and times_called >= mrt_steps
mrt_drawn = mrt_schedule == "mix" and random.random() < self.mrt_p
if mrt_schedule == "mtl":
batch_loss = self.model.get_loss_and_risk_for_batch(
batch,
self.loss,
n_samples=self.mrt_samples,
alpha=self.mrt_alpha,
strategy=self.mrt_strategy,
max_len=self.mrt_max_len,
cost=self.mrt_cost,
level=self.trg_level,
mrt_lambda=self.mrt_lambda
)
if warmed_up or mrt_drawn:
batch_loss = self.mrt_lambda * self.model.get_risk_for_batch(
batch,
n_samples=self.mrt_samples,
alpha=self.mrt_alpha,
strategy=self.mrt_strategy,
max_len=self.mrt_max_len,
cost=self.mrt_cost,
level=self.trg_level
)
else:
batch_loss = self.model.get_loss_for_batch(batch, self.loss)
norm = batch.nseqs if self.norm_type == "batch" else batch.ntokens
norm_batch_loss = batch_loss / norm
# division needed since loss.backward sums the gradients until updated
norm_batch_multiply = norm_batch_loss / self.batch_multiplier
# compute gradients
norm_batch_multiply.backward()
if self.clip_grad_fun is not None:
self.clip_grad_fun(self.model.parameters()) # works in-place
if update:
# make gradient step
self.optimizer.step()
self.optimizer.zero_grad()
self.steps += 1
# increment token counter
self.total_tokens += batch.ntokens
return norm_batch_loss.detach()
def _add_report(self, valid_scores: dict, new_best: bool = False) -> None:
"""
Append a one-line report to validation logging file.
:param valid_score: validation evaluation score [eval_metric]
:param valid_ppl: validation perplexity
:param valid_loss: validation loss (sum over whole validation set)
:param eval_metric: evaluation metric, e.g. "bleu"
:param new_best: whether this is a new best model
"""
current_lr = -1
# ignores other param groups for now
for param_group in self.optimizer.param_groups:
current_lr = param_group['lr']
if current_lr < self.learning_rate_min:
self.stop = True # why does this happen inside _add_report?
with open(self.valid_report_file, 'a') as opened_file:
labeled_scores = sorted(valid_scores.items())
eval_report = "\t".join("{}: {:.5f}".format(n, v)
for n, v in labeled_scores)
opened_file.write(
"Steps: {}\t{}\tLR: {:.8f}\t{}\n".format(
self.steps, eval_report,
current_lr, "*" if new_best else ""))
def _log_parameters_list(self) -> None:
"""
Write all model parameters (name, shape) to the log.
"""
model_parameters = filter(lambda p: p.requires_grad,
self.model.parameters())
n_params = sum(np.prod(p.size()) for p in model_parameters)
self.logger.info("Total params: %d", n_params)
trainable_params = [n for (n, p) in self.model.named_parameters()
if p.requires_grad]
self.logger.info("Trainable parameters: %s", sorted(trainable_params))
assert trainable_params
def _log_examples(
self,
sources_raw: Dict[str, List[str]],
hypotheses: List[str],
references: List[str],
hypotheses_raw: List[List[str]] = None,
references_raw: List[List[str]] = None) -> None:
"""
Log a the first `self.log_valid_sents` sentences from given examples.
:param sources: decoded sources (dict of list of strings)
:param hypotheses: decoded hypotheses (list of strings)
:param references: decoded references (list of strings)
:param sources_raw: raw sources (list of list of tokens)
:param hypotheses_raw: raw hypotheses (list of list of tokens)
:param references_raw: raw references (list of list of tokens)
"""
ix = self.log_valid_sents
assert all(i < len(hypotheses) for i in ix)
sources = {k: postprocess(v, self.src_level)
for k, v in sources_raw.items()}
for i in ix:
self.logger.info("Example #{}".format(i))
for f, rs in sources_raw.items():
self.logger.debug("\t{}: {}".format(f, rs[i]))
if references_raw is not None:
self.logger.debug("\tRaw reference: %s", references_raw[i])
if hypotheses_raw is not None:
self.logger.debug("\tRaw hypothesis: %s", hypotheses_raw[i])
for f, srcs in sources.items():
self.logger.info("\t{}: {}".format(f, srcs[i]))
self.logger.info("\tReference: %s", references[i])
self.logger.info("\tHypothesis: %s", hypotheses[i])
def _store_outputs(self, hypotheses: List[str]) -> None:
"""
Write current validation outputs to file in `self.model_dir.`
:param hypotheses: list of strings
"""
valid_output_file = join(self.model_dir, "{}.hyps".format(self.steps))
with open(valid_output_file, 'w') as f:
for hyp in hypotheses:
f.write("{}\n".format(hyp))
def train(cfg_file: str) -> None:
"""
Main training function. After training, also test on test data if given.
:param cfg_file: path to configuration yaml file
"""
cfg = load_config(cfg_file)
train_cfg = cfg["training"]
data_cfg = cfg["data"]
# set the random seed
set_seed(seed=train_cfg.get("random_seed", 42))
# load the data
data = load_data(data_cfg)
train_data = data["train_data"]
dev_data = data["dev_data"]
test_data = data["test_data"]
vocabs = data["vocabs"]
# build an encoder-decoder model
model = build_model(cfg["model"], vocabs=vocabs)
# for training management, e.g. early stopping and model selection
trainer = TrainManager(model=model, config=cfg)
# store copy of original training config in model dir
shutil.copy2(cfg_file, join(trainer.model_dir, "config.yaml"))
# log all entries of config
log_cfg(cfg, trainer.logger)
log_data_info(
train_data=train_data,
valid_data=dev_data,
test_data=test_data,
vocabs=vocabs,
logging_function=trainer.logger.info)
trainer.logger.info(str(model))
# store the vocabs
model_dir = train_cfg["model_dir"]
for vocab_name, vocab in vocabs.items():
vocab.to_file(join(model_dir, vocab_name + "_vocab.txt"))
# train the model
trainer.train_and_validate(train_data=train_data, valid_data=dev_data)
# predict with the best model on validation and test
# (if test data is available)
ckpt = join(trainer.model_dir, str(trainer.best_ckpt_iteration) + ".ckpt")
output_name = "{:08d}.hyps".format(trainer.best_ckpt_iteration)
output_path = join(trainer.model_dir, output_name)
test(cfg_file, ckpt=ckpt, output_path=output_path, logger=trainer.logger)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Joey-NMT')
parser.add_argument("config", default="configs/default.yaml", type=str,
help="Training configuration file (yaml).")
args = parser.parse_args()
train(cfg_file=args.config)
| [
"torch.save",
"torch.FloatTensor"
] | 1.2 | deep-spin/S7 | c987906b032eaa727c8bcbec53f48befb467e515 |
1.2 | from typing import Dict
import torch
from torch import Tensor
from torch.distributions.categorical import Categorical
from joeynmt.helpers import tile
def sample_decode(
model,
size: int,
encoder_output,
masks: Dict[str, Tensor],
max_output_length: int,
labels: dict = None):
"""
Sample size sequences from the model
In each decoding step, find the k most likely partial hypotheses.
:param decoder:
:param size: size of the beam
:param encoder_output:
:param masks:
:param max_output_length:
:return:
- stacked_output: dim?,
- scores: dim?
"""
# init
transformer = model.is_transformer
any_mask = next(iter(masks.values()))
batch_size = any_mask.size(0)
att_vectors = None # not used for Transformer
device = encoder_output.device
masks.pop("trg", None) # mutating one of the inputs is not good
# Recurrent models only: initialize RNN hidden state
if not transformer and model.decoder.bridge_layer is not None:
hidden = model.decoder.bridge_layer(encoder_output.hidden)
else:
hidden = None
# tile encoder states and decoder initial states beam_size times
if hidden is not None:
# layers x batch*k x dec_hidden_size
hidden = tile(hidden, size, dim=1)
# encoder_output: batch*k x src_len x enc_hidden_size
encoder_output.tile(size, dim=0)
masks = {k: tile(v, size, dim=0) for k, v in masks.items()}
# Transformer only: create target mask
masks["trg"] = any_mask.new_ones([1, 1, 1]) if transformer else None
# the structure holding all batch_size * k partial hypotheses
alive_seq = torch.full(
(batch_size * size, 1),
model.bos_index,
dtype=torch.long,
device=device
)
# the structure indicating, for each hypothesis, whether it has
# encountered eos yet (if it has, stop updating the hypothesis
# likelihood)
is_finished = torch.zeros(
batch_size * size, dtype=torch.bool, device=device
)
# for each (batch x size) sequence, there is a log probability
seq_probs = torch.zeros(batch_size * size, device=device)
for step in range(1, max_output_length + 1):
dec_input = alive_seq if transformer else alive_seq[:, -1].view(-1, 1)
# decode a step
probs, hidden, att_scores, att_vectors = model.decode(
trg_input=dec_input,
encoder_output=encoder_output,
masks=masks,
decoder_hidden=hidden,
prev_att_vector=att_vectors,
unroll_steps=1,
labels=labels,
generate="true"
)
# batch*k x trg_vocab
# probs = model.decoder.gen_func(logits[:, -1], dim=-1).squeeze(1)
next_ids = Categorical(probs).sample().unsqueeze(1) # batch*k x 1
next_scores = probs.gather(1, next_ids).squeeze(1) # batch*k
seq_probs = torch.where(
is_finished, seq_probs, seq_probs + next_scores.log()
)
# append latest prediction
# batch_size*k x hyp_len
alive_seq = torch.cat([alive_seq, next_ids], -1)
# update which hypotheses are finished
is_finished = is_finished | next_ids.eq(model.eos_index).squeeze(1)
if is_finished.all():
break
# final_outputs: batch x size x len
final_outputs = alive_seq.view(batch_size, size, -1)
seq_probs = seq_probs.view(batch_size, size)
max_scores, max_ix = seq_probs.max(dim=-1)
outs = []
for b in range(final_outputs.size(0)):
outs.append(final_outputs[b, max_ix[b]])
best_outputs = torch.stack(outs) # hmm, maybe not as good as pad and stack
# print(torch.index_select(final_outputs, 0, max_ix).size())
#print(final_outputs[:, max_ix].size())
#print(final_outputs[:, max_ix].size())
return best_outputs, max_scores
| [
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.distributions.categorical.Categorical",
"torch.full"
] | 1.2 | deep-spin/S7 | c987906b032eaa727c8bcbec53f48befb467e515 |
1.2 | import torch
from joeynmt.model import EncoderOutput
from joeynmt.decoders import TransformerDecoder, TransformerDecoderLayer
from .test_helpers import TensorTestCase
class TestTransformerDecoder(TensorTestCase):
def setUp(self):
self.emb_size = 12
self.num_layers = 3
self.hidden_size = 12
self.ff_size = 24
self.num_heads = 4
self.dropout = 0.
self.seed = 42
def test_transformer_decoder_freeze(self):
torch.manual_seed(self.seed)
encoder = TransformerDecoder(freeze=True)
for n, p in encoder.named_parameters():
self.assertFalse(p.requires_grad)
def test_transformer_decoder_output_size(self):
vocab_size = 11
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, vocab_size=vocab_size)
if not hasattr(decoder, "output_size"):
self.fail("Missing output_size property.")
self.assertEqual(decoder.output_size, vocab_size)
def test_transformer_decoder_forward(self):
torch.manual_seed(self.seed)
batch_size = 2
src_time_dim = 4
trg_time_dim = 5
vocab_size = 7
trg_embed = torch.rand(size=(batch_size, trg_time_dim, self.emb_size))
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, emb_dropout=self.dropout,
vocab_size=vocab_size)
encoder_output = EncoderOutput(
torch.rand(size=(batch_size, src_time_dim, self.hidden_size)),
None)
for p in decoder.parameters():
torch.nn.init.uniform_(p, -0.5, 0.5)
src_mask = torch.ones(size=(batch_size, 1, src_time_dim)) == 1
trg_mask = torch.ones(size=(batch_size, trg_time_dim, 1)) == 1
output, states, _, _ = decoder(
trg_embed, encoder_output, src_mask, trg_mask)
output_target = torch.Tensor(
[[[ 0.1765, 0.4578, 0.2345, -0.5303, 0.3862, 0.0964, 0.6882],
[ 0.3363, 0.3907, 0.2210, -0.5414, 0.3770, 0.0748, 0.7344],
[ 0.3275, 0.3729, 0.2797, -0.3519, 0.3341, 0.1605, 0.5403],
[ 0.3081, 0.4513, 0.1900, -0.3443, 0.3072, 0.0570, 0.6652],
[ 0.3253, 0.4315, 0.1227, -0.3371, 0.3339, 0.1129, 0.6331]],
[[ 0.3235, 0.4836, 0.2337, -0.4019, 0.2831, -0.0260, 0.7013],
[ 0.2800, 0.5662, 0.0469, -0.4156, 0.4246, -0.1121, 0.8110],
[ 0.2968, 0.4777, 0.0652, -0.2706, 0.3146, 0.0732, 0.5362],
[ 0.3108, 0.4910, 0.0774, -0.2341, 0.2873, 0.0404, 0.5909],
[ 0.2338, 0.4371, 0.1350, -0.1292, 0.0673, 0.1034, 0.5356]]]
)
self.assertEqual(output_target.shape, output.shape)
self.assertTensorAlmostEqual(output_target, output)
greedy_predictions = output.argmax(-1)
expect_predictions = output_target.argmax(-1)
self.assertTensorEqual(expect_predictions, greedy_predictions)
states_target = torch.Tensor(
[[[ 8.3742e-01, -1.3161e-01, 2.1876e-01, -1.3920e-01, -9.1572e-01,
2.3006e-01, 3.8328e-01, -1.6271e-01, 3.7370e-01, -1.2110e-01,
-4.7549e-01, -4.0622e-01],
[ 8.3609e-01, -2.9161e-02, 2.0583e-01, -1.3571e-01, -8.0510e-01,
2.7630e-01, 4.8219e-01, -1.8863e-01, 1.1977e-01, -2.0179e-01,
-4.4314e-01, -4.1228e-01],
[ 8.5478e-01, 1.1368e-01, 2.0400e-01, -1.3059e-01, -8.1042e-01,
1.6369e-01, 5.4244e-01, -2.9103e-01, 3.9919e-01, -3.3826e-01,
-4.5423e-01, -4.2516e-01],
[ 9.0388e-01, 1.1853e-01, 1.9927e-01, -1.1675e-01, -7.7208e-01,
2.0686e-01, 4.6024e-01, -9.1610e-02, 3.9778e-01, -2.6214e-01,
-4.7688e-01, -4.0807e-01],
[ 8.9476e-01, 1.3646e-01, 2.0298e-01, -1.0910e-01, -8.2137e-01,
2.8025e-01, 4.2538e-01, -1.1852e-01, 4.1497e-01, -3.7422e-01,
-4.9212e-01, -3.9790e-01]],
[[ 8.8745e-01, -2.5798e-02, 2.1483e-01, -1.8219e-01, -6.4821e-01,
2.6279e-01, 3.9598e-01, -1.0423e-01, 3.0726e-01, -1.1315e-01,
-4.7201e-01, -3.6979e-01],
[ 7.5528e-01, 6.8919e-02, 2.2486e-01, -1.6395e-01, -7.9692e-01,
3.7830e-01, 4.9367e-01, 2.4355e-02, 2.6674e-01, -1.1740e-01,
-4.4945e-01, -3.6367e-01],
[ 8.3467e-01, 1.7779e-01, 1.9504e-01, -1.6034e-01, -8.2783e-01,
3.2627e-01, 5.0045e-01, -1.0181e-01, 4.4797e-01, -4.8046e-01,
-3.7264e-01, -3.7392e-01],
[ 8.4359e-01, 2.2699e-01, 1.9721e-01, -1.5768e-01, -7.5897e-01,
3.3738e-01, 4.5559e-01, -1.0258e-01, 4.5782e-01, -3.8058e-01,
-3.9275e-01, -3.8412e-01],
[ 9.6349e-01, 1.6264e-01, 1.8207e-01, -1.6910e-01, -5.9304e-01,
1.4468e-01, 2.4968e-01, 6.4794e-04, 5.4930e-01, -3.8420e-01,
-4.2137e-01, -3.8016e-01]]]
)
self.assertEqual(states_target.shape, states.shape)
self.assertTensorAlmostEqual(states_target, states)
def test_transformer_decoder_layers(self):
torch.manual_seed(self.seed)
batch_size = 2
src_time_dim = 4
trg_time_dim = 5
vocab_size = 7
decoder = TransformerDecoder(
num_layers=self.num_layers, num_heads=self.num_heads,
hidden_size=self.hidden_size, ff_size=self.ff_size,
dropout=self.dropout, vocab_size=vocab_size)
self.assertEqual(len(decoder.layers), self.num_layers)
for layer in decoder.layers:
self.assertTrue(isinstance(layer, TransformerDecoderLayer))
self.assertTrue(hasattr(layer, "src_trg_att"))
self.assertTrue(hasattr(layer, "trg_trg_att"))
self.assertTrue(hasattr(layer, "feed_forward"))
self.assertEqual(layer.size, self.hidden_size)
self.assertEqual(
layer.feed_forward.pwff_layer[0].in_features, self.hidden_size)
self.assertEqual(
layer.feed_forward.pwff_layer[0].out_features, self.ff_size)
| [
"torch.rand",
"torch.ones",
"torch.manual_seed",
"torch.nn.init.uniform_",
"torch.Tensor"
] | 1.2 | deep-spin/S7 | c987906b032eaa727c8bcbec53f48befb467e515 |
1.7 | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
import argparse
import json
import math
import os
import pickle
from collections import Counter, defaultdict
from copy import deepcopy
from functools import partial
from multiprocessing import Pool
from pathlib import Path
from typing import Dict, List, Tuple
import sys
PACKAGE_PARENT = ".."
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import torch
from tqdm import tqdm
from utils.boxes import box_iou_helper, combine_boxes, get_boxes_equiv, obj_to_box, region_to_box, xyxy_to_xywh
from utils.dump import Annotation, Datapoint
from utils.spans import (
PreprocessError,
consolidate_spans,
get_canonical_spans,
span_intersect_spanlist,
spanlist_intersect_spanlist,
)
from utils.text import get_root_and_nouns, normalize_sentence, normalize_whitespace, simplify_punctuation
from utils.unionfind import UnionFind
def parse_args():
parser = argparse.ArgumentParser("Visual Genome conversion script")
parser.add_argument(
"--dataset_path",
required=True,
type=str,
help="Path to the VG dataset. Should contain region graphs",
)
parser.add_argument(
"--out_path",
default=None,
type=str,
help="Path where to export the resulting dataset.",
)
parser.add_argument(
"--nb_process",
default=1,
type=str,
help="Number of concurrent processes to use to dump the data",
)
return parser.parse_args()
def preprocess_region(region):
filtered_region = {
"caption": simplify_punctuation(normalize_whitespace(region["phrase"])),
"original_image_id": region["image_id"],
"original_region_id": region["region_id"],
"boxes": [],
"tokens_positive": [],
"found_objects": False,
}
if len(filtered_region["caption"]) < 3:
raise PreprocessError("caption too short, skipping" + filtered_region["caption"])
_, _, root_spans, negative_spans = get_root_and_nouns(filtered_region["caption"].lower(), False)
# Filter objects that have multiple synsets, they are likely to be spurious
obj_synsets = set([o["synsets"][0] for o in region["objects"] if len(o["synsets"]) == 1])
synsets_count = Counter([s["synset_name"] for s in region["synsets"]])
# Filter synsets that occur multiple times, since we don't have mapping to objects
all_synsets = set([s["synset_name"] for s in region["synsets"] if synsets_count[s["synset_name"]] == 1])
authorized_synsets = obj_synsets.intersection(all_synsets)
syn2span: Dict[str, Tuple[int, int]] = {
s["synset_name"]: (s["entity_idx_start"], s["entity_idx_end"])
for s in region["synsets"]
if s["synset_name"] in authorized_synsets
}
synlist, spanlist = [], []
for k, s in syn2span.items():
synlist.append(k)
spanlist.append([s])
# the spans positions may have been altered by the whitespace removal, so we recompute here
spanlist, new_caption = get_canonical_spans(spanlist, region["phrase"], whitespace_only=True)
if new_caption.lower().strip() != filtered_region["caption"].lower().strip():
raise PreprocessError(f"Inconsistent whitespace removal: '{new_caption}' vs '{filtered_region['caption']}'")
assert len(synlist) == len(spanlist)
syn2span = {k: v[0] for k, v in zip(synlist, spanlist)}
root_objs = []
other_objs: Dict[Tuple[int, int], List[List[int]]] = {}
for obj in region["objects"]:
if len(obj["synsets"]) == 1 and obj["synsets"][0] in authorized_synsets:
cur_span = syn2span[obj["synsets"][0]]
if span_intersect_spanlist(cur_span, root_spans):
root_objs.append(obj_to_box(obj))
filtered_region["found_objects"] = True
else:
if cur_span not in other_objs:
other_objs[cur_span] = []
negative_spans.append(cur_span)
other_objs[cur_span].append(obj_to_box(obj))
filtered_region["found_objects"] = True
if len(root_objs) == 0:
# If we don't have a box for the root of the sentence, we use the box of the region itself.
root_objs.append(region_to_box(region))
dedup_root_objs = combine_boxes(root_objs)
filtered_region["boxes"] += dedup_root_objs
root_spans = consolidate_spans(root_spans, filtered_region["caption"])
filtered_region["tokens_positive"] += [root_spans for _ in range(len(dedup_root_objs))]
for span, objs in other_objs.items():
dedup_objs = combine_boxes(objs)
filtered_region["boxes"] += dedup_objs
cur_spans = consolidate_spans([span], filtered_region["caption"])
filtered_region["tokens_positive"] += [cur_spans for _ in range(len(dedup_objs))]
filtered_region["tokens_negative"] = consolidate_spans(negative_spans, filtered_region["caption"])
return filtered_region
def deduplicate_regions(regions, iou_threshold=0.5):
"""This functions accepts pre-processed region descriptions for a given image, and removes regions that are redundant.
Two regions are deemed redundant if 1) the text is closely matching 2) the IOU between region boxes is > iou_threshold
A cleaned description is returned.
"""
def helper_merge(regions):
if len(regions) <= 1:
return regions
uf = UnionFind(len(regions))
for r in regions:
spans, txt2 = get_canonical_spans(r["tokens_positive"], r["caption"])
if txt != txt2:
raise PreprocessError(f"inconsistent canonicalization fct. Mismatch: '{txt}' and '{txt2}'")
r["cano_tokens"] = spans
for r1 in range(len(regions)):
for r2 in range(r1 + 1, len(regions)):
compatible = True
assert len(regions[r1]["boxes"]) == len(regions[r1]["cano_tokens"])
assert len(regions[r2]["boxes"]) == len(regions[r2]["cano_tokens"])
ious = box_iou_helper(regions[r1]["boxes"], regions[r2]["boxes"])
for b1 in range(len(regions[r1]["cano_tokens"])):
for b2 in range(len(regions[r2]["cano_tokens"])):
if (len(regions[r1]["cano_tokens"][b1]) == 0 or len(regions[r2]["cano_tokens"][b2]) == 0) or (
spanlist_intersect_spanlist(regions[r1]["cano_tokens"][b1], regions[r2]["cano_tokens"][b2])
and ious[b1][b2] < iou_threshold
):
compatible = False
break
if not compatible:
break
if compatible:
uf.unite(r1, r2)
compo2regions = defaultdict(list)
for i, r in enumerate(regions):
compo2regions[uf.find(i)].append(r)
final_regions = []
for reg_list in compo2regions.values():
if len(reg_list) == 1:
final_regions.append(reg_list[0])
else:
# We pick as representative of this cluster the region with the most boxes
sorted_regions = sorted([(len(r["boxes"]), i) for i, r in enumerate(reg_list)], reverse=True)
reg_ids = [sr[1] for sr in sorted_regions]
# We need to put the boxes and token spans in buckets
cano_spans_buckets = []
orig_spans_buckets = []
boxes_buckets = []
for idx in reg_ids:
for b in range(len(reg_list[idx]["boxes"])):
# find the bucket
bucket = -1
for j in range(len(cano_spans_buckets)):
if spanlist_intersect_spanlist(reg_list[idx]["cano_tokens"][b], cano_spans_buckets[j]):
bucket = j
break
if bucket == -1:
# bucket not found, creating one.
if idx != reg_ids[0]:
# This shouldn't happen. But if it does, we give up on the merging
return regions
assert idx == reg_ids[0], (
"TODO: if this triggers, it means another regions has token spans than aren't covered by the main region."
+ "We need to create a new token span, which involve finding the span in the original sentencen of the main region. Don't forget to update the negative tokens"
)
bucket = len(orig_spans_buckets)
orig_spans_buckets.append(reg_list[idx]["tokens_positive"][b])
cano_spans_buckets.append(reg_list[idx]["cano_tokens"][b])
boxes_buckets.append([reg_list[idx]["boxes"][b]])
else:
boxes_buckets[bucket].append(reg_list[idx]["boxes"][b])
assert len(orig_spans_buckets) == len(boxes_buckets)
merged_region = deepcopy(reg_list[reg_ids[0]])
merged_region["tokens_positive"] = []
merged_region["boxes"] = []
for i in range(len(boxes_buckets)):
dedup_objs = combine_boxes(boxes_buckets[i], iou_threshold=0.5)
merged_region["boxes"] += dedup_objs
merged_region["tokens_positive"] += [orig_spans_buckets[i] for _ in range(len(dedup_objs))]
final_regions.append(merged_region)
for r in final_regions:
del r["cano_tokens"]
return final_regions
txt2region = defaultdict(list)
for r in regions:
txt2region[normalize_sentence(r["caption"])].append(r)
stupid_sentence_set = set(["wall", "side", "building"])
final_regions = []
for txt, regions in txt2region.items():
# Edge case, we remove the sentences like "the wall on the side of the building" which are uninformative and have spurious boxes
if "wall" in txt and set(txt.strip().split(" ")).issubset(stupid_sentence_set):
continue
if len(regions) == 1:
final_regions.append(deepcopy(regions[0]))
else:
# print(txt)
regions_with_boxes = [r for r in regions if r["found_objects"]]
all_boxes = sum([r["boxes"] for r in regions_with_boxes], [])
# print("regions with boxes", len(regions_with_boxes))
regions_without_boxes = []
for r in regions:
if not r["found_objects"]:
# we consider than one of the region with boxes will be better suited and drop this one
# if there is a positive iou. Otherwise, we have to keep it
if len(regions_with_boxes) == 0 or box_iou_helper(all_boxes, r["boxes"]).max().item() < 0.1:
regions_without_boxes.append(r)
# print("regions without boxes", len(regions_without_boxes))
try:
new_regions_with_boxes = helper_merge(regions_with_boxes)
except PreprocessError as e:
print("skipping", e)
# Ouch, hit a cornercase, we give up on the merge
new_regions_with_boxes = regions_with_boxes
try:
new_regions_without_boxes = helper_merge(regions_without_boxes)
except PreprocessError as e:
print("skipping", e)
# Ouch, hit a cornercase, we give up on the merge
new_regions_without_boxes = regions_without_boxes
# now collapse into one big region. We do it only when the captions are exactly matching, otherwise it's a nightmare to recompute spans
capt2region = defaultdict(list)
for r in new_regions_with_boxes + new_regions_without_boxes:
capt2region[r["caption"]].append(r)
for capt, reg_list in capt2region.items():
all_boxes = sum([r["boxes"] for r in reg_list], [])
all_tokens = sum([r["tokens_positive"] for r in reg_list], [])
compo2boxes, compo2id = get_boxes_equiv(all_boxes, iou_threshold=0.75)
final_boxes = []
final_tokens = []
if compo2boxes is not None:
for compo in compo2boxes.keys():
box_list = compo2boxes[compo]
id_list = compo2id[compo]
final_boxes.append(xyxy_to_xywh(torch.stack(box_list, 0).mean(0)).tolist())
final_tokens.append(consolidate_spans(sum([all_tokens[i] for i in id_list], []), capt))
else:
final_boxes = all_boxes
final_tokens = all_tokens
merged_region = {
"caption": capt,
"original_image_id": reg_list[0]["original_image_id"],
"original_region_id": reg_list[0]["original_region_id"],
"boxes": final_boxes,
"tokens_positive": final_tokens,
"tokens_negative": consolidate_spans(sum([r["tokens_negative"] for r in reg_list], []), capt),
"found_objects": False,
}
final_regions.append(merged_region)
return final_regions
def _get_all_datapoints(output_path: Path, img_list, proc_id: int):
# image2ann_map = defaultdict(lambda: defaultdict(list))
print(f"process {proc_id} got job queue of", len(img_list))
all_datapoints: List[Datapoint] = []
for i, data in enumerate(tqdm(img_list)):
# print(f"status {i}/{len(img_list)}")
all_regions = []
for r in data["regions"]:
try:
all_regions.append(preprocess_region(r))
except PreprocessError as e:
print("Dropping region, preprocess failed", e)
all_regions = deduplicate_regions(all_regions)
# all_regions = deduplicate_regions([preprocess_region(r) for r in data["regions"]])
for region in all_regions:
cur_datapoint = Datapoint(
image_id=data["image_id"],
dataset_name="VG",
tokens_negative=region["tokens_negative"],
original_id=region["original_region_id"],
caption=region["caption"],
annotations=[],
)
assert len(region["boxes"]) == len(region["tokens_positive"])
converted_bbox = torch.as_tensor(region["boxes"], dtype=torch.float)
areas = converted_bbox[:, -1] * converted_bbox[:, -2]
# Convert to (x,y,x,y) format
converted_bbox[:, 2:] += converted_bbox[:, :2]
for i in range(len(region["boxes"])):
cur_ann = Annotation(
area=float(areas[i]),
iscrowd=0,
category_id=1,
bbox=region["boxes"][i],
giou_friendly_bbox=converted_bbox[i].tolist(),
tokens_positive=region["tokens_positive"][i],
)
cur_datapoint.annotations.append(cur_ann)
all_datapoints.append(cur_datapoint)
print(f"Process {proc_id} dumping...")
pickle.dump(all_datapoints, open(output_path / f"vg_train_dump_{proc_id}.pkl", "wb"))
print(f"Process {proc_id} done.")
del all_datapoints
return None
# return image2ann_map
def chunk_list(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
def get_all_datapoints(dataset_path: Path, output_path: Path, nb_proc: int):
print("loading region graphs....")
with open(dataset_path / "region_graphs.json", "r") as f:
VG_region_graph = json.load(f)
print("loading success!")
# return _get_image2ann_mapping(VG_region_graph)
chunks = list(chunk_list(VG_region_graph, math.ceil(len(VG_region_graph) / (18 * nb_proc))))
# sub_part = sum(chunks[:3], [])
# chunks = list(chunk_list(sub_part, math.ceil(len(sub_part) / nb_proc)))
proc_id = list(range(len(chunks)))
# assert len(chunks) == nb_proc
with Pool(nb_proc) as p:
p.starmap(partial(_get_all_datapoints, output_path), zip(chunks, proc_id))
return None
def main(args):
vg_path = Path(args.dataset_path)
output_path = Path(args.out_path) if args.out_path is not None else vg_path
os.makedirs(str(output_path), exist_ok=True)
get_all_datapoints(vg_path, output_path, int(args.nb_process))
if __name__ == "__main__":
main(parse_args())
| [
"torch.as_tensor",
"torch.stack"
] | 1.7.0 | bonejay/mdetr | 38c6d7c26d6d493f7bf6772ba65a72b493573d90 |
1.6 | import argparse
import time
import torch
from hivemind.proto.runtime_pb2 import CompressionType
from hivemind.utils.compression import serialize_torch_tensor, deserialize_torch_tensor
def benchmark_compression(tensor: torch.Tensor, compression_type: CompressionType) -> float:
t = time.time()
deserialize_torch_tensor(serialize_torch_tensor(tensor, compression_type))
return time.time() - t
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--size', type=int, default=10000000, required=False)
parser.add_argument('--seed', type=int, default=7348, required=False)
parser.add_argument('--num_iters', type=int, default=30, required=False)
args = parser.parse_args()
torch.manual_seed(args.seed)
X = torch.randn(args.size)
for name, compression_type in CompressionType.items():
tm = 0
for i in range(args.num_iters):
tm += benchmark_compression(X, compression_type)
tm /= args.num_iters
print(f"Compression type: {name}, time: {tm}")
| [
"torch.manual_seed",
"torch.randn"
] | 1.6.0 | Vsevolod-pl/hivemind | 0300cfd91adeb14d91d9659a98221628f9b775b9 |
1.6 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
try:
from torchvision.models.utils import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Inception weights ported to Pytorch from
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' # noqa: E501
class InceptionV3(nn.Module):
"""Pretrained InceptionV3 network returning feature maps"""
# Index of default block of inception to return,
# corresponds to output of final average pooling
DEFAULT_BLOCK_INDEX = 3
# Maps feature dimensionality to their output blocks indices
BLOCK_INDEX_BY_DIM = {
64: 0, # First max pooling features
192: 1, # Second max pooling featurs
768: 2, # Pre-aux classifier features
2048: 3 # Final average pooling features
}
def __init__(self,
output_blocks=(DEFAULT_BLOCK_INDEX,),
resize_input=True,
normalize_input=True,
requires_grad=False,
use_fid_inception=True):
"""Build pretrained InceptionV3
Parameters
----------
output_blocks : list of int
Indices of blocks to return features of. Possible values are:
- 0: corresponds to output of first max pooling
- 1: corresponds to output of second max pooling
- 2: corresponds to output which is fed to aux classifier
- 3: corresponds to output of final average pooling
resize_input : bool
If true, bilinearly resizes input to width and height 299 before
feeding input to model. As the network without fully connected
layers is fully convolutional, it should be able to handle inputs
of arbitrary size, so resizing might not be strictly needed
normalize_input : bool
If true, scales the input from range (0, 1) to the range the
pretrained Inception network expects, namely (-1, 1)
requires_grad : bool
If true, parameters of the model require gradients. Possibly useful
for finetuning the network
use_fid_inception : bool
If true, uses the pretrained Inception model used in Tensorflow's
FID implementation. If false, uses the pretrained Inception model
available in torchvision. The FID Inception model has different
weights and a slightly different structure from torchvision's
Inception model. If you want to compute FID scores, you are
strongly advised to set this parameter to true to get comparable
results.
"""
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert self.last_needed_block <= 3, \
'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = _inception_v3(pretrained=True)
# Block 0: input to maxpool1
block0 = [
inception.Conv2d_1a_3x3,
inception.Conv2d_2a_3x3,
inception.Conv2d_2b_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block0))
# Block 1: maxpool1 to maxpool2
if self.last_needed_block >= 1:
block1 = [
inception.Conv2d_3b_1x1,
inception.Conv2d_4a_3x3,
nn.MaxPool2d(kernel_size=3, stride=2)
]
self.blocks.append(nn.Sequential(*block1))
# Block 2: maxpool2 to aux classifier
if self.last_needed_block >= 2:
block2 = [
inception.Mixed_5b,
inception.Mixed_5c,
inception.Mixed_5d,
inception.Mixed_6a,
inception.Mixed_6b,
inception.Mixed_6c,
inception.Mixed_6d,
inception.Mixed_6e,
]
self.blocks.append(nn.Sequential(*block2))
# Block 3: aux classifier to final avgpool
if self.last_needed_block >= 3:
block3 = [
inception.Mixed_7a,
inception.Mixed_7b,
inception.Mixed_7c,
nn.AdaptiveAvgPool2d(output_size=(1, 1))
]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
"""Get Inception feature maps
Parameters
----------
inp : torch.autograd.Variable
Input tensor of shape Bx3xHxW. Values are expected to be in
range (0, 1)
Returns
-------
List of torch.autograd.Variable, corresponding to the selected output
block, sorted ascending by index
"""
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x,
size=(299, 299),
mode='bilinear',
align_corners=False)
if self.normalize_input:
x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
for idx, block in enumerate(self.blocks):
x = block(x)
if idx in self.output_blocks:
outp.append(x)
if idx == self.last_needed_block:
break
return outp
def _inception_v3(*args, **kwargs):
"""Wraps `torchvision.models.inception_v3`
Skips default weight inititialization if supported by torchvision version.
See https://github.com/mseitzer/pytorch-fid/issues/28.
"""
try:
version = tuple(map(int, torchvision.__version__.split('.')[:2]))
except ValueError:
# Just a caution against weird version strings
version = (0,)
if version >= (0, 6):
kwargs['init_weights'] = False
return torchvision.models.inception_v3(*args, **kwargs)
def fid_inception_v3():
"""Build pretrained Inception model for FID computation
The Inception model for FID computation uses a different set of weights
and has a slightly different structure than torchvision's Inception.
This method first constructs torchvision's Inception and then patches the
necessary parts that are different in the FID Inception model.
"""
inception = _inception_v3(num_classes=1008,
aux_logits=False,
pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception
class FIDInceptionA(torchvision.models.inception.InceptionA):
"""InceptionA block patched for FID computation"""
def __init__(self, in_channels, pool_features):
super(FIDInceptionA, self).__init__(in_channels, pool_features)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionC(torchvision.models.inception.InceptionC):
"""InceptionC block patched for FID computation"""
def __init__(self, in_channels, channels_7x7):
super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_1(torchvision.models.inception.InceptionE):
"""First InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: Tensorflow's average pool does not use the padded zero's in
# its average calculation
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
"""Second InceptionE block patched for FID computation"""
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
# Patch: The FID Inception model uses max pooling instead of average
# pooling. This is likely an error in this specific Inception
# implementation, as other Inception models use average pooling here
# (which matches the description in the paper).
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) | [
"torch.cat",
"torch.nn.functional.avg_pool2d",
"torch.nn.ModuleList",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.utils.model_zoo.load_url",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.max_pool2d"
] | 1.6.0 | man-sean/pscgan | be87e519cf789dc28b052afcea6c135a74cdbaaa |
1.5 | import torch
import cv2
import numpy as np
from .models.with_mobilenet import PoseEstimationWithMobileNet
from .modules.keypoints import extract_keypoints, group_keypoints
from .modules.load_state import load_state
from .modules.pose import Pose
from . import demo
def get_rect(net, images, height_size=512):
stride = 8
upsample_ratio = 4
num_keypoints = Pose.num_kpts
previous_poses = []
delay = 33
for image in images:
rect_path = image.replace('.%s' % (image.split('.')[-1]), '_rect.txt')
img = cv2.imread(image, cv2.IMREAD_COLOR)
orig_img = img.copy()
heatmaps, pafs, scale, pad = demo.infer_fast(net, img, height_size, stride, upsample_ratio, cpu=False)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(num_keypoints): # 19th for bg
total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx], all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, pafs, demo=True)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride / upsample_ratio - pad[0]) / scale
current_poses = []
rects = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
valid_keypoints = []
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
valid_keypoints.append([pose_keypoints[kpt_id, 0], pose_keypoints[kpt_id, 1]])
valid_keypoints = np.array(valid_keypoints)
if pose_entries[n][10] != -1.0 or pose_entries[n][13] != -1.0:
pmin = valid_keypoints.min(0)
pmax = valid_keypoints.max(0)
center = (0.5 * (pmax[:2] + pmin[:2])).astype(np.int)
radius = int(0.65 * max(pmax[0] - pmin[0], pmax[1] - pmin[1]))
elif pose_entries[n][10] == -1.0 and pose_entries[n][13] == -1.0 and pose_entries[n][8] != -1.0 and \
pose_entries[n][11] != -1.0:
# if leg is missing, use pelvis to get cropping
center = (0.5 * (pose_keypoints[8] + pose_keypoints[11])).astype(np.int)
radius = int(1.45 * np.sqrt(((center[None, :] - valid_keypoints) ** 2).sum(1)).max(0))
center[1] += int(0.05 * radius)
else:
center = np.array([img.shape[1] // 2, img.shape[0] // 2])
radius = max(img.shape[1] // 2, img.shape[0] // 2)
x1 = center[0] - radius
y1 = center[1] - radius
rects.append([x1, y1, 2 * radius, 2 * radius])
np.savetxt(rect_path, np.array(rects), fmt='%d')
def get_pose_model():
net = PoseEstimationWithMobileNet()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
checkpoint = torch.load('lightweight_human_pose_estimation_pytorch/checkpoint_iter_370000.pth', map_location=device)
load_state(net, checkpoint)
return net
def get_pose(net, image_path):
get_rect(net.cuda(), [image_path], 512)
| [
"torch.cuda.is_available",
"torch.load"
] | 1.5.1 | psi1104/pifuhd | 32be6642d8ee198f6186ec7ab82f329d95a9f275 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
__all__ = [
'pvt_tiny', 'pvt_small', 'pvt_medium', 'pvt_large',
'pvt_small_sk2ffn'
]
class SK2(nn.Module):
def __init__(self, dim, reduce_ratio=4.):
super().__init__()
hidden_dim = max(int(dim // reduce_ratio), 32)
print('hidden_dim = ', hidden_dim)
self.mlp = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, dim),
nn.BatchNorm1d(dim),
)
# Mlp(in_features=dim, hidden_features=hidden_dim)
self.alpha = nn.Parameter(torch.zeros(1, 1))
def forward(self, x0, x1):
sig = self.alpha.sigmoid()
attn = (1.0 - sig) * x0.mean(dim=1, keepdim=False) + sig * x1.mean(dim=1, keepdim=False)
attn = self.mlp(attn).sigmoid().unsqueeze(1)
x = x0 + attn * (x1 - x0)
return x
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SK2Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.sk2 = SK2(hidden_features)
def forward(self, x):
y = self.fc1(x)
y = self.act(y)
x = self.sk2(x, y)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block_sk2mlp(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = SK2Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
# x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))
# x = self.sk2(x, self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x)))
# x = self.sk2ln(x, self.mlp(self.norm2(x)))
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SK2LN(nn.Module):
def __init__(self, dim, reduce_ratio=4.):
super().__init__()
hidden_dim = max(int(dim // reduce_ratio), 32)
print('hidden_dim = ', hidden_dim)
self.mlp = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.LayerNorm(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, dim),
nn.LayerNorm(dim),
#nn.BatchNorm1d(dim),
)
# Mlp(in_features=dim, hidden_features=hidden_dim)
self.alpha = nn.Parameter(torch.zeros(1, 1))
def forward(self, x0, x1):
sig = self.alpha.sigmoid()
attn = (1.0 - sig) * x0.mean(dim=1, keepdim=False) + sig * x1.mean(dim=1, keepdim=False)
attn = self.mlp(attn).sigmoid().unsqueeze(1)
x = x0 + attn * (x1 - x0)
return x
class Block_sk2lnffn(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.sk2ln = SK2LN(dim)
print('using ffn sk2ln')
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
# x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))
# x = self.sk2(x, self.attn(self.norm1(x), H, W))
# x = x + self.drop_path(self.mlp(self.norm2(x)))
x = self.sk2ln(x, self.mlp(self.norm2(x)))
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
#self.sk2 = SK2(dim)
#print('using ffn sk2')
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
# x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))
# x = self.sk2(x, self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x)))
# x = self.sk2(x, self.mlp(self.norm2(x)))
return x
class Block_sk2ffn(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.sk2 = SK2(dim)
print('using ffn sk2')
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
# x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))
# x = self.sk2(x, self.attn(self.norm1(x), H, W))
# x = x + self.drop_path(self.mlp(self.norm2(x)))
x = self.sk2(x, self.mlp(self.norm2(x)))
return x
class Block_sk2ffnx(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.sk2 = SK2(dim)
print('using ffn sk2 x, x+mlp(x)')
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
# x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))
# x = self.sk2(x, self.attn(self.norm1(x), H, W))
# x = x + self.drop_path(self.mlp(self.norm2(x)))
x = self.sk2(x, x + self.mlp(self.norm2(x)))
return x
class Block_sk2(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.sk2 = SK2(dim)
print('using sk2')
def forward(self, x, H, W):
# x = x + self.drop_path(self.attn(self.norm1(x), H, W))
# x = self.sk2(x, self.drop_path(self.attn(self.norm1(x), H, W)))
x = self.sk2(x, self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x)))
# x = self.sk2(x, self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
f"img_size {img_size} should be divided by patch_size {patch_size}."
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.norm = nn.LayerNorm(embed_dim)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm(x)
H, W = H // self.patch_size[0], W // self.patch_size[1]
return x, (H, W)
class PyramidVisionTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], Block=None):
super().__init__()
self.num_classes = num_classes
self.depths = depths
# patch_embed
self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans,
embed_dim=embed_dims[0])
self.patch_embed2 = PatchEmbed(img_size=img_size // 4, patch_size=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1])
self.patch_embed3 = PatchEmbed(img_size=img_size // 8, patch_size=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2])
self.patch_embed4 = PatchEmbed(img_size=img_size // 16, patch_size=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3])
# pos_embed
self.pos_embed1 = nn.Parameter(torch.zeros(1, self.patch_embed1.num_patches, embed_dims[0]))
self.pos_drop1 = nn.Dropout(p=drop_rate)
self.pos_embed2 = nn.Parameter(torch.zeros(1, self.patch_embed2.num_patches, embed_dims[1]))
self.pos_drop2 = nn.Dropout(p=drop_rate)
self.pos_embed3 = nn.Parameter(torch.zeros(1, self.patch_embed3.num_patches, embed_dims[2]))
self.pos_drop3 = nn.Dropout(p=drop_rate)
self.pos_embed4 = nn.Parameter(torch.zeros(1, self.patch_embed4.num_patches + 1, embed_dims[3]))
self.pos_drop4 = nn.Dropout(p=drop_rate)
# transformer encoder
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
self.block1 = nn.ModuleList([Block(
dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[0])
for i in range(depths[0])])
cur += depths[0]
self.block2 = nn.ModuleList([Block(
dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[1])
for i in range(depths[1])])
cur += depths[1]
self.block3 = nn.ModuleList([Block(
dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[2])
for i in range(depths[2])])
cur += depths[2]
self.block4 = nn.ModuleList([Block(
dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
sr_ratio=sr_ratios[3])
for i in range(depths[3])])
self.norm = norm_layer(embed_dims[3])
# cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[3]))
# classification head
self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
# init weights
trunc_normal_(self.pos_embed1, std=.02)
trunc_normal_(self.pos_embed2, std=.02)
trunc_normal_(self.pos_embed3, std=.02)
trunc_normal_(self.pos_embed4, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def reset_drop_path(self, drop_path_rate):
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
cur = 0
for i in range(self.depths[0]):
self.block1[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[0]
for i in range(self.depths[1]):
self.block2[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[1]
for i in range(self.depths[2]):
self.block3[i].drop_path.drop_prob = dpr[cur + i]
cur += self.depths[2]
for i in range(self.depths[3]):
self.block4[i].drop_path.drop_prob = dpr[cur + i]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
# return {'pos_embed', 'cls_token'} # has pos_embed may be better
return {'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
# def _get_pos_embed(self, pos_embed, patch_embed, H, W):
# if H * W == self.patch_embed1.num_patches:
# return pos_embed
# else:
# return F.interpolate(
# pos_embed.reshape(1, patch_embed.H, patch_embed.W, -1).permute(0, 3, 1, 2),
# size=(H, W), mode="bilinear").reshape(1, -1, H * W).permute(0, 2, 1)
def forward_features(self, x):
B = x.shape[0]
# stage 1
x, (H, W) = self.patch_embed1(x)
x = x + self.pos_embed1
x = self.pos_drop1(x)
for blk in self.block1:
x = blk(x, H, W)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
# stage 2
x, (H, W) = self.patch_embed2(x)
x = x + self.pos_embed2
x = self.pos_drop2(x)
for blk in self.block2:
x = blk(x, H, W)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
# stage 3
x, (H, W) = self.patch_embed3(x)
x = x + self.pos_embed3
x = self.pos_drop3(x)
for blk in self.block3:
x = blk(x, H, W)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
# stage 4
x, (H, W) = self.patch_embed4(x)
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed4
x = self.pos_drop4(x)
for blk in self.block4:
x = blk(x, H, W)
x = self.norm(x)
return x[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
@register_model
def pvt_tiny(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_small(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], Block=Block, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_small_sk2ffn(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
Block=Block_sk2ffn, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_small_sk2ffnx(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
Block=Block_sk2ffnx, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_small_sk2(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
Block=Block_sk2, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_small_sk2lnffn(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
Block=Block_sk2lnffn, **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_medium(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_large(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1],
**kwargs)
model.default_cfg = _cfg()
return model
@register_model
def pvt_huge_v2(pretrained=False, **kwargs):
model = PyramidVisionTransformer(
patch_size=4, embed_dims=[128, 256, 512, 768], num_heads=[2, 4, 8, 12], mlp_ratios=[8, 8, 4, 4], qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 10, 60, 3], sr_ratios=[8, 4, 2, 1],
# drop_rate=0.0, drop_path_rate=0.02)
**kwargs)
model.default_cfg = _cfg()
return model
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Identity",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d"
] | 1.7.0 | implus/PVT | 4f70d09f2c0390a9ca2dabf271d725f2d8f75d08 |
0.4 | import sys
import argparse
import copy
import random
import torch
import torch.utils.data as data
from random import shuffle
from collections import OrderedDict
import continual_benchmark.dataloaders.base
import continual_benchmark.agents as agents
import continual_benchmark.dataloaders as dataloaders
from continual_benchmark.dataloaders.datasetGen import SplitGen, PermutedGen
from vae_experiments import models_definition
from vae_experiments import training_functions
from vae_experiments import vae_utils
from visualise import *
exp_values = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101]
parts = len(exp_values)
def run(args):
if not os.path.exists('outputs'):
os.mkdir('outputs')
train_dataset, val_dataset = dataloaders.base.__dict__[args.dataset](args.dataroot, args.skip_normalization,
args.train_aug)
if args.n_permutation > 0:
train_dataset_splits, val_dataset_splits, task_output_space = PermutedGen(train_dataset, val_dataset,
args.n_permutation,
remap_class=not args.no_class_remap)
else:
train_dataset_splits, val_dataset_splits, task_output_space = SplitGen(train_dataset, val_dataset,
first_split_sz=args.first_split_size,
other_split_sz=args.other_split_size,
rand_split=args.rand_split,
remap_class=not args.no_class_remap)
# Calculate constants
n_classes = train_dataset.number_classes
labels_tasks = {}
for task_name, task in train_dataset_splits.items():
labels_tasks[int(task_name)] = task.dataset.class_list
n_tasks = len(labels_tasks)
n_channels = val_dataset.dataset[0][0].size()[0]
in_size = val_dataset.dataset[0][0].size()[1]
agent_config = {'lr': args.base_lr,
'momentum': args.base_momentum,
'nesterov': args.base_nesterov,
'weight_decay': args.base_weight_decay,
'base_schedule': args.base_schedule,
'base_model_type': args.base_model_type,
'base_model_name': args.base_model_name,
'base_model_weights': args.base_model_weights,
'out_dim': {'All': args.base_force_out_dim} if args.base_force_out_dim > 0 else task_output_space,
'optimizer': args.base_optimizer,
'base_print_freq': args.base_print_freq,
'score_generated_images_by_freezed_classifier': args.score_generated_images_by_freezed_classifier,
'gpuid': args.gpuid}
agent = agents.default.NormalNN(agent_config, n_channels=n_channels, in_size=in_size, n_classes=n_classes,
d=args.base_model_d, model_bn=args.base_model_bn,max_pool=args.base_max_pool, n_conv=args.base_n_conv,
dropout_rate=args.base_dropout_rate)
# Decide split ordering
task_names = sorted(list(task_output_space.keys()), key=int)
print('Task order:', task_names)
if args.rand_split_order:
shuffle(task_names)
print('Shuffled task order:', task_names)
acc_table = OrderedDict()
test_acc_table = OrderedDict()
# Prepare VAE
local_vae = models_definition.VAE(latent_size=args.gen_latent_size, d=args.gen_d, p_coding=args.gen_p_coding,
n_dim_coding=args.gen_n_dim_coding, device=device, n_channels=n_channels,
in_size=in_size).to(device)
print(local_vae)
class_table = torch.zeros(n_tasks, n_classes, dtype=torch.long)
global_classes_list = []
global_n_codes = []
for task_id in range(len(task_names)):
print("######### Task number {} #########".format(task_id))
task_name = task_names[task_id]
# VAE
print("Train local VAE model")
n_codes = len(train_dataset_splits[task_name])
train_dataset_loader = torch.utils.data.DataLoader(dataset=train_dataset_splits[task_name],
batch_size=args.gen_batch_size, shuffle=True, drop_last=False)
data_loader_stable = torch.utils.data.DataLoader(dataset=train_dataset_splits[task_name],
batch_size=args.gen_batch_size, shuffle=False, drop_last=False)
data_loader_total = torch.utils.data.DataLoader(dataset=train_dataset_splits[task_name],
batch_size=n_codes, shuffle=False, drop_last=False)
global_n_codes.append(n_codes)
start_id = int(np.sum(global_n_codes[:task_id]))
codes_range = range(start_id, start_id + n_codes)
codes_rep = torch.Tensor()
for exp_value in exp_values:
codes = codes_range * np.array(
exp_value ** np.floor(args.gen_latent_size // parts * np.log(2) / np.log(exp_value)),
dtype=np.longlong) % 2 ** args.gen_latent_size // parts
codes = torch.tensor(
models_definition.unpackbits(np.array(codes, dtype=np.longlong), args.gen_latent_size // parts)).float()
codes_rep = torch.cat([codes_rep, codes], 1)
if args.gen_load_pretrained_models:
codes_rep = (codes_rep.repeat([args.gen_batch_size, 1, 1]) * 2 - 1)
else:
codes_rep = (codes_rep.repeat([args.gen_batch_size, 1, 1]).to(device) * 2 - 1)
if args.gen_load_pretrained_models:
local_vae.load_state_dict(torch.load(args.gen_pretrained_models_dir + f'model{task_id}_local_vae'))
global_classes_list = np.load(args.gen_pretrained_models_dir + f'model{task_id}_classes.npy')
else:
dataloader_with_codes = training_functions.train_local_generator(local_vae, train_dataset_loader,
data_loader_stable,
data_loader_total,
global_classes_list, task_id, codes_rep,
args.gen_batch_size,
n_epochs_pre=args.gen_ae_pre_epochs,
n_epochs=args.gen_ae_epochs)
print("Done training local VAE model")
del codes_rep
if not task_id:
# First task, initializing global decoder as local_vae's decoder
curr_global_decoder = copy.deepcopy(local_vae.decoder)
else:
print("Train global VAE model")
# Retraining global decoder with previous global decoder and local_vae
if args.gen_load_pretrained_models:
curr_global_decoder = models_definition.Decoder(local_vae.latent_size, args.gen_d*4,
p_coding=local_vae.p_coding,
n_dim_coding=local_vae.n_dim_coding,
device=local_vae.device,
n_channels=n_channels, in_size=in_size).to(
local_vae.device)
curr_global_decoder.load_state_dict(
torch.load(args.gen_pretrained_models_dir + f'model{task_id}_curr_decoder'))
else:
curr_global_decoder = training_functions.train_global_decoder(curr_global_decoder, local_vae,
dataloader_with_codes, task_id=task_id,
codes_rep=None, total_n_codes=n_codes,
global_n_codes=global_n_codes,
global_classes_list=global_classes_list,
d=args.gen_d,
n_epochs=args.gen_ae_epochs,
batch_size=args.gen_batch_size,
n_channels=n_channels, in_size=in_size)
torch.cuda.empty_cache()
# Plotting results for already learned tasks
if not args.gen_load_pretrained_models:
vae_utils.plot_results(args.experiment_name, curr_global_decoder, task_id, n_codes, global_n_codes,
global_classes_list)
vae_utils.plot_results(args.experiment_name, local_vae.decoder, task_id, n_codes, global_n_codes,
global_classes_list, 5, "_local_vae")
torch.save(curr_global_decoder.state_dict(), f"results/{args.experiment_name}/model{task_id}_curr_decoder")
torch.save(local_vae.state_dict(), f"results/{args.experiment_name}/model{task_id}_local_vae")
torch.save(agent.model.state_dict(), f"results/{args.experiment_name}/model{task_id}_classifier")
np.save(f"results/{args.experiment_name}/model{task_id}_classes", global_classes_list)
# Classifier
train_loader = data.DataLoader(train_dataset_splits[task_name],
batch_size=args.base_batch_size,
shuffle=True,
num_workers=args.workers)
val_loader = data.DataLoader(val_dataset_splits[task_name],
batch_size=args.base_batch_size,
shuffle=True,
num_workers=args.workers)
agent.learn_batch(train_loader, val_loader, curr_global_decoder, local_vae, class_table, global_classes_list,
task_id, n_codes, global_n_codes, args.new_task_data_processing)
# Classifier validation
acc_table[task_name] = OrderedDict()
for j in range(task_id + 1):
agent.active_neurons = torch.zeros((1, 4000))
val_name = task_names[j]
print('validation split name:', val_name)
val_data = val_dataset_splits[val_name] if not args.base_eval_on_train_set else train_dataset_splits[val_name]
val_loader = data.DataLoader(val_data,
batch_size=args.base_batch_size,
shuffle=True,
num_workers=args.workers)
acc_table[val_name][task_name] = agent.validation(val_loader)
return acc_table, task_names, test_acc_table
def get_args(argv):
parser = argparse.ArgumentParser()
# General
parser.add_argument('--experiment_name', type=str, default='default_run', help='Name of current experiment')
parser.add_argument('--rpath', type=str, default='results/', help='Directory to save results')
parser.add_argument('--gpuid', nargs="+", type=int, default=[0],
help="The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only")
parser.add_argument('--repeat', type=int, default=1, help="Repeat the experiment N times")
parser.add_argument('--seed', type=int, required=False,
help="Random seed. If defined all random operations will be reproducible")
# Data
parser.add_argument('--dataroot', type=str, default='data', help="The root folder of dataset or downloaded data")
parser.add_argument('--dataset', type=str, default='MNIST', help="MNIST(default)|FashionMNIST|CIFAR10|CIFAR100")
parser.add_argument('--n_permutation', type=int, default=0, help="Enable permuted tests when >0")
parser.add_argument('--first_split_size', type=int, default=2)
parser.add_argument('--other_split_size', type=int, default=2)
parser.add_argument('--rand_split', dest='rand_split', default=False, action='store_true',
help="Randomize the classes in splits")
parser.add_argument('--rand_split_order', dest='rand_split_order', default=False, action='store_true',
help="Randomize the order of splits")
parser.add_argument('--no_class_remap', dest='no_class_remap', default=False, action='store_true',
help="Avoid the dataset with a subset of classes doing the remapping. Ex: [2,5,6 ...] -> [0,1,2 ...]")
parser.add_argument('--skip_normalization', action='store_true', help='Loads dataset without normalization')
parser.add_argument('--train_aug', dest='train_aug', default=False, action='store_true',
help="Allow data augmentation during training")
parser.add_argument('--workers', type=int, default=0, help="#Thread for dataloader")
# Learning options
parser.add_argument('--new_task_data_processing', type=str,
choices=['original', 'original_through_vae', 'generated'],
default='original', help="Determines train data for base network.")
parser.add_argument('--score_generated_images_by_freezed_classifier', default=True, action='store_true',
help="Score generated images by freezed classifier. If false - generator prompts the labels")
# Base network - currently classfier
parser.add_argument('--base_batch_size', type=int, default=100)
parser.add_argument('--base_model_type', type=str, default='mlp',
help="The type (lenet|resnet|cifar_net) of backbone network")
parser.add_argument('--base_model_name', type=str, default='MLP', help="The name of actual model for the backbone")
parser.add_argument('--base_force_out_dim', type=int, default=2,
help="Set 0 to let the task decide the required output dimension")
parser.add_argument('--base_schedule', nargs="+", type=int, default=[2],
help="The list of epoch numbers to reduce learning rate by factor of 0.1. Last number is the end epoch")
parser.add_argument('--base_print_freq', type=float, default=100, help="Print the log at every x iteration")
parser.add_argument('--base_model_weights', type=str, default=None,
help="The path to the file for the model weights (*.pth).")
parser.add_argument('--base_eval_on_train_set', dest='base_eval_on_train_set', default=False, action='store_true',
help="Force the evaluation on train set")
parser.add_argument('--base_model_d', type=int, default=64, help="Size of base network")
parser.add_argument('--base_model_bn', default=True, help="Use batch norm in base network")
parser.add_argument('--base_max_pool', default=False, help="Use max pooling in base network")
parser.add_argument('--base_n_conv', type=int, default=3, help="Num of convs in base network")
parser.add_argument('--base_dropout_rate', type=float, default=0.4, help="Dropout rate in base network")
parser.add_argument('--base_optimizer', type=str, default='Adam',
help="SGD|Adam|RMSprop|amsgrad|Adadelta|Adagrad|Adamax ...")
parser.add_argument('--base_lr', type=float, default=0.01, help="Learning rate for base network")
parser.add_argument('--base_nesterov', action='store_true', help='Whether to use nesterov momentum in base network')
parser.add_argument('--base_momentum', type=float, default=0)
parser.add_argument('--base_weight_decay', type=float, default=0)
# Generative network - currently binary latent autoencoder
parser.add_argument('--gen_batch_size', type=int, default=50)
parser.add_argument('--gen_n_dim_coding', type=int, default=10,
help="Number of bits used to code task id in binary autoencoder")
parser.add_argument('--gen_p_coding', type=int, default=307,
help="Prime number used to calculated codes in binary autoencoder")
parser.add_argument('--gen_latent_size', type=int, default=200, help="Latent size in binary autoencoder")
parser.add_argument('--gen_d', type=int, default=32, help="Size of binary autoencoder")
parser.add_argument('--gen_ae_pre_epochs', type=int, default=20,
help="Number of epochs to train autoencoder before freezing the codes")
parser.add_argument('--gen_ae_epochs', type=int, default=200, help="Number of epochs to train autoencoder")
parser.add_argument('--gen_load_pretrained_models', default=False, help="Load pretrained generative models")
parser.add_argument('--gen_pretrained_models_dir', type=str, default="results/pretrained_models",
help="Directory of pretrained generative models")
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
args = get_args(sys.argv[1:])
torch.cuda.set_device(0)
device = torch.device("cuda")
if args.seed:
print("Using manual seed = {}".format(args.seed))
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
print("WARNING: Not using manual seed - your experiments will not be reproducible")
acc_val, acc_test = {}, {}
os.makedirs(f"{args.rpath}{args.experiment_name}", exist_ok=True)
with open(f"{args.rpath}{args.experiment_name}/args.txt", "w") as text_file:
text_file.write(str(args))
for r in range(args.repeat):
acc_val[r], _, acc_test[r] = run(args)
np.save(f"{args.rpath}{args.experiment_name}/acc_val.npy", acc_val)
np.save(f"{args.rpath}{args.experiment_name}/acc_test.npy", acc_test)
plot_final_results([args.experiment_name])
| [
"torch.zeros",
"torch.device",
"torch.cat",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.utils.data.DataLoader",
"torch.load",
"torch.Tensor"
] | 0.4.1 | KamilDeja/BinPlay | a8626e0bd85ed2f0c064b0c78c95a0bc8c0eb14e |
1.0 | import os
import gc
import json
import time
import tqdm
import argparse
import datetime
import torch.distributed as dist
import torch.utils.data.distributed
from warpctc_pytorch import CTCLoss
from novograd import (AdamW,
Novograd)
from linknet import (SemsegLoss,
MaskSimilarity)
from decoder import GreedyDecoder
from model import DeepSpeech, supported_rnns
from data.utils import reduce_tensor, get_cer_wer
from data.data_loader_aug import (SpectrogramDataset,
BucketingSampler,
BucketingLenSampler,
DistributedBucketingSampler)
import torch
import warnings
from torch._six import inf
tq = tqdm.tqdm
VISIBLE_DEVICES = os.environ.get('CUDA_VISIBLE_DEVICES', '').split(',') or ['0']
parser = argparse.ArgumentParser(description='DeepSpeech training')
parser.add_argument('--train-manifest', metavar='DIR',
help='path to train manifest csv', default='data/train_manifest.csv')
parser.add_argument('--cache-dir', metavar='DIR',
help='path to save temp audio', default='data/cache/')
parser.add_argument('--train-val-manifest', metavar='DIR',
help='path to train validation manifest csv', default='')
parser.add_argument('--val-manifest', metavar='DIR',
help='path to validation manifest csv', default='data/val_manifest.csv')
parser.add_argument('--curriculum', metavar='DIR',
help='path to curriculum file', default='')
parser.add_argument('--use-curriculum', action='store_true', default=False)
parser.add_argument('--curriculum-ratio', default=0.5, type=float)
parser.add_argument('--cl-point', default=0.1, type=float)
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--batch-size', default=20, type=int, help='Batch size for training')
parser.add_argument('--val-batch-size', default=20, type=int, help='Batch size for training')
parser.add_argument('--num-workers', default=4, type=int, help='Number of workers used in data-loading')
parser.add_argument('--labels-path', default='labels.json', help='Contains all characters for transcription')
parser.add_argument('--phonemes-path', default='phonemes_ru.json', help='Contains all phonemes for the Russian language')
parser.add_argument('--use-bpe', dest='use_bpe', action='store_true', help='Use sentencepiece BPE tokens')
parser.add_argument('--sp-model', dest='sp_model', default='data/spm_train_v05_cleaned_asr_10s_phoneme.model',
type=str, help='Pre-trained sentencepiece model')
parser.add_argument('--use-phonemes', action='store_true', default=False)
parser.add_argument('--phonemes-only', action='store_true', default=False)
parser.add_argument('--omit-spaces', action='store_true', default=False)
parser.add_argument('--subword-regularization', action='store_true', default=False)
parser.add_argument('--batch-similar-lens', dest='batch_similar_lens', action='store_true',
help='Force usage of sampler that batches items with similar duration together')
parser.add_argument('--pytorch-mel', action='store_true', help='Use pytorch based STFT + MEL')
parser.add_argument('--pytorch-stft', action='store_true', help='Use pytorch based STFT')
parser.add_argument('--denoise', action='store_true', help='Train a denoising head')
parser.add_argument('--use-attention', action='store_true', help='Use attention based decoder instead of CTC')
parser.add_argument('--double-supervision', action='store_true', help='Use both CTC and attention in sequence')
parser.add_argument('--naive-split', action='store_true', help='Use a naive DS2 inspired syllable split')
parser.add_argument('--grapheme-phoneme', action='store_true', help='Use both phonemes and graphemes with BPE to train from scratch')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram in seconds')
parser.add_argument('--window', default='hamming', help='Window type for spectrogram generation')
parser.add_argument('--hidden-size', default=800, type=int, help='Hidden size of RNNs')
parser.add_argument('--cnn-width', default=256, type=int, help='w2l-like network width')
parser.add_argument('--kernel-size', default=7, type=int, help='cnn kernel size')
parser.add_argument('--hidden-layers', default=6, type=int, help='Number of RNN layers')
parser.add_argument('--rnn-type', default='gru', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--decoder-layers', default=4, type=int)
parser.add_argument('--decoder-girth', default=1, type=int)
parser.add_argument('--dropout', default=0, type=float, help='Fixed dropout for CNN based models')
parser.add_argument('--epochs', default=70, type=int, help='Number of training epochs')
parser.add_argument('--cuda', dest='cuda', action='store_true', help='Use cuda to train model')
parser.add_argument('--lr', '--learning-rate', default=3e-4, type=float, help='initial learning rate')
parser.add_argument('--optimizer', default='sgd', help='Optimizer - sgd or adam')
parser.add_argument('--weight-decay', default=0, help='Weight decay for SGD', type=float)
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--batch-norm-momentum', default=0.1, type=float, help='BatchNorm momentum')
parser.add_argument('--max-norm', default=100, type=int, help='Norm cutoff to prevent explosion of gradients')
parser.add_argument('--norm-warmup-epochs', default=1000, type=int, help='Do gradient clipping only before some epoch')
parser.add_argument('--gradient-accumulation-steps', default=1, type=int, help='Accumulate gradients for some time first')
parser.add_argument('--learning-anneal', default=1.1, type=float, help='Annealing applied to learning rate every epoch')
parser.add_argument('--checkpoint-anneal', default=1.0, type=float,
help='Annealing applied to learning rate every checkpoint')
parser.add_argument('--silent', dest='silent', action='store_true', help='Turn off progress tracking per iteration')
parser.add_argument('--checkpoint', dest='checkpoint', action='store_true', help='Enables checkpoint saving of model')
parser.add_argument('--checkpoint-per-samples', default=0, type=int, help='Save checkpoint per samples. 0 means never save')
parser.add_argument('--visdom', dest='visdom', action='store_true', help='Turn on visdom graphing')
parser.add_argument('--enorm', dest='enorm', action='store_true', help='Turn on enorm ( https://github.com/facebookresearch/enorm )')
parser.add_argument('--tensorboard', dest='tensorboard', action='store_true', help='Turn on tensorboard graphing')
parser.add_argument('--log-dir', default='visualize/deepspeech_final', help='Location of tensorboard log')
parser.add_argument('--log-params', dest='log_params', action='store_true', help='Log parameter values and gradients')
parser.add_argument('--id', default='Deepspeech training', help='Identifier for visdom/tensorboard run')
parser.add_argument('--save-folder', default='models/', help='Location to save epoch models')
parser.add_argument('--continue-from', default='', help='Continue from checkpoint model')
parser.add_argument('--norm', default='max_frame', action="store",
help='Normalize sounds. Choices: "mean", "frame", "max_frame", "none"')
parser.add_argument('--finetune', dest='finetune', action='store_true',
help='Finetune the model from checkpoint "continue_from"')
parser.add_argument('--augment', dest='augment', action='store_true', help='Use random tempo and gain perturbations.')
parser.add_argument('--noise-dir', default=None,
help='Directory to inject noise into audio. If default, noise Inject not added')
parser.add_argument('--noise-prob', default=0.4, type=float, help='Probability of noise being added per sample')
parser.add_argument('--aug-type', default=0, type=int, help='Type of augs to use')
parser.add_argument('--aug-prob-8khz', default=0, type=float, help='Probability of dropping half of stft frequencies, robustness to 8kHz audio')
parser.add_argument('--aug-prob-spect', default=0, type=float, help='Probability of applying spectrogram based augmentations')
parser.add_argument('--noise-min', default=0.0,
help='Minimum noise level to sample from. (1.0 means all noise, not original signal)', type=float)
parser.add_argument('--noise-max', default=0.5,
help='Maximum noise levels to sample from. Maximum 1.0', type=float)
parser.add_argument('--no-shuffle', dest='no_shuffle', action='store_true',
help='Turn off shuffling and sample from dataset based on sequence length (smallest to largest)')
parser.add_argument('--no-sortaGrad', dest='no_sorta_grad', action='store_true',
help='Turn off ordering of dataset on sequence length for the first epoch.')
parser.add_argument('--reverse-sort', dest='reverse_sort', action='store_true',
help='Turn off reverse ordering of dataset on sequence length for the first epoch.')
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True,
help='Turn off bi-directional RNNs, introduces lookahead convolution')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:1550', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str, help='distributed backend')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--rank', default=0, type=int,
help='The rank of this process')
parser.add_argument('--gpu-rank', default=None,
help='If using distributed parallel for multi-gpu, sets the GPU for the process')
parser.add_argument('--data-parallel', dest='data_parallel', action='store_true',
help='Use data parallel')
parser.add_argument('--use-lookahead', dest='use_lookahead', action='store_true',
help='Use look ahead optimizer')
torch.manual_seed(123456)
torch.cuda.manual_seed_all(123456)
def to_np(x):
return x.data.cpu().numpy()
def clip_grad_norm_(parameters, max_norm, norm_type=2):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
# print(clip_coef)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
def calc_grad_norm(parameters, max_norm, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
return clip_coef
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class MultipleOptimizer(object):
def __init__(self, op):
self.optimizers = op
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
def state_dict(self):
out = [op.state_dict() for op in self.optimizers]
return out
def load_state_dict(self,
states):
assert len(states) == len(self.optimizers)
for i in range(len(self.optimizers)):
self.optimizers[i].load_state_dict(states[i])
def build_optimizer(args_,
parameters_=None,
model=None):
# import aggmo
# return aggmo.AggMo(model.parameters(), args_.lr, betas=[0, 0.6, 0.9])
if args_.weight_decay > 0:
print('Using weight decay {} for SGD'.format(args_.weight_decay))
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
import itertools
adam_lr = 1e-4 # / 10
sgd_lr = args_.lr
print('Using double supervision, SGD with clipping for CTC, ADAM for s2s')
print('SGD LR {} / ADAM LR {}'.format(sgd_lr, adam_lr))
if 'transformer' in args.rnn_type:
print('Using transformer-type double optimizer')
params_ctc = [model.rnns.layers.parameters()]
params_adam = [model.rnns.decoder.parameters(),
model.fc.parameters()]
else:
params_ctc = [model.rnns.layers.parameters(),
model.rnns.ctc_decoder.parameters(),
model.rnns.ctc_fc.parameters()]
params_adam = [model.rnns.s2s_decoder.parameters()]
ctc_optimizer = torch.optim.SGD(itertools.chain(*params_ctc),
lr=args_.lr,
momentum=args_.momentum,
nesterov=True)
s2s_optimizer = torch.optim.Adam(itertools.chain(*params_adam),
lr=adam_lr)
return MultipleOptimizer([ctc_optimizer, s2s_optimizer])
elif args_.optimizer == 'sgd':
print('Using SGD')
try:
base_optimizer = torch.optim.SGD(parameters_, lr=args_.lr,
momentum=args_.momentum, nesterov=True,
weight_decay=args_.weight_decay)
if args_.use_lookahead:
print('Using SGD + Lookahead')
from lookahead import Lookahead
return Lookahead(base_optimizer=base_optimizer,
k=5,
alpha=0.5)
return base_optimizer
except:
# wo nesterov
return torch.optim.SGD(parameters_, lr=args_.lr,
momentum=args_.momentum, nesterov=False,
weight_decay=args_.weight_decay)
elif args_.optimizer=='adam':
print('Using ADAM')
return torch.optim.Adam(parameters_, lr=args_.lr)
elif args_.optimizer=='novograd':
print('Using Novograd')
return Novograd(parameters_, lr=args_.lr)
elif args_.optimizer=='adamw':
print('Using ADAMW')
return AdamW(parameters_, lr=args_.lr)
viz = None
tensorboard_writer = None
class PlotWindow:
def __init__(self, title, suffix, log_x=False, log_y=False):
self.loss_results = torch.Tensor(10000)
self.cer_results = torch.Tensor(10000)
self.wer_results = torch.Tensor(10000)
self.epochs = torch.arange(1, 10000)
self.viz_window = None
self.tb_subplot='/'+suffix
global viz, tensorboard_writer
hour_now = str(datetime.datetime.now()).split('.', 1)[0][:-3]
self.opts = dict(title=title + ': ' + hour_now, ylabel='', xlabel=suffix, legend=['Loss', 'WER', 'CER'])
self.opts['layoutopts'] = {'plotly': {}}
if log_x:
self.opts['layoutopts']['plotly'] = {'xaxis': {'type': 'log'}}
if log_y:
self.opts['layoutopts']['plotly'] = {'yaxis': {'type': 'log'}}
if args.visdom and is_leader:
if viz is None:
from visdom import Visdom
viz = Visdom()
if args.tensorboard and is_leader:
os.makedirs(args.log_dir, exist_ok=True)
if tensorboard_writer is None:
from tensorboardX import SummaryWriter
tensorboard_writer = SummaryWriter(args.log_dir)
def plot_history(self, position):
global viz, tensorboard_writer
if is_leader and args.visdom:
# Add previous scores to visdom graph
x_axis = self.epochs[0:position]
y_axis = torch.stack(
(self.loss_results[0:position],
self.wer_results[0:position],
self.cer_results[0:position]),
dim=1)
self.viz_window = viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
if is_leader and args.tensorboard:
# Previous scores to tensorboard logs
for i in range(position):
values = {
'Avg Train Loss': self.loss_results[i],
'Avg WER': self.wer_results[i],
'Avg CER': self.cer_results[i]
}
tensorboard_writer.add_scalars(args.id+self.tb_subplot,
values, i + 1)
def plot_progress(self, epoch, avg_loss, cer_avg, wer_avg):
global viz, tensorboard_writer
if args.visdom and is_leader:
x_axis = self.epochs[0:epoch + 1]
y_axis = torch.stack(
(self.loss_results[0:epoch + 1],
self.wer_results[0:epoch + 1],
self.cer_results[0:epoch + 1]), dim=1)
if self.viz_window is None:
self.viz_window = viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
else:
viz.line(
X=x_axis.unsqueeze(0).expand(y_axis.size(1), x_axis.size(0)).transpose(0, 1), # Visdom fix
Y=y_axis,
win=self.viz_window,
update='replace',
)
if args.tensorboard and is_leader:
values = {
'Avg Train Loss': avg_loss,
'Avg WER': wer_avg,
'Avg CER': cer_avg
}
tensorboard_writer.add_scalars(args.id+self.tb_subplot,
values,
epoch + 1)
if args.log_params:
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)
tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)
class LRPlotWindow:
def __init__(self, title, suffix, log_x=False, log_y=False):
self.loss_results = torch.Tensor(10000)
self.epochs = torch.Tensor(10000)
self.viz_window = None
self.suffix = suffix
self.tb_subplot='/'+suffix
global viz, tensorboard_writer
hour_now = str(datetime.datetime.now()).split('.', 1)[0][:-3]
self.opts = dict(title=title + ': ' + hour_now, ylabel='', xlabel=suffix, legend=['Loss'])
self.opts['layoutopts'] = {'plotly': {}}
if log_x:
self.opts['layoutopts']['plotly'] = {'xaxis': {'type': 'log'}}
if log_y:
self.opts['layoutopts']['plotly'] = {'yaxis': {'type': 'log'}}
if args.visdom and is_leader:
if viz is None:
from visdom import Visdom
viz = Visdom()
if args.tensorboard and is_leader:
os.makedirs(args.log_dir, exist_ok=True)
if tensorboard_writer is None:
from tensorboardX import SummaryWriter
tensorboard_writer = SummaryWriter(args.log_dir)
def plot_progress(self, epoch, avg_loss, cer_avg, wer_avg):
global viz, tensorboard_writer
if args.visdom and is_leader:
x_axis = self.epochs[0:epoch + 1]
y_axis = torch.stack((
self.loss_results[0:epoch + 1],
), dim=1)
if self.viz_window is None:
self.viz_window = viz.line(
X=x_axis,
Y=y_axis,
opts=self.opts,
)
else:
viz.line(
X=x_axis,
Y=y_axis,
win=self.viz_window,
update='replace',
)
if args.tensorboard and is_leader:
values = {
'Avg Train Loss': avg_loss,
}
tensorboard_writer.add_scalars(args.id+self.tb_subplot,
values, epoch + 1)
if args.log_params:
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
tensorboard_writer.add_histogram(tag, to_np(value), epoch + 1)
tensorboard_writer.add_histogram(tag + '/grad', to_np(value.grad), epoch + 1)
def get_lr():
if args.use_lookahead:
return optimizer.optimizer.state_dict()['param_groups'][0]['lr']
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
# SGD state
optim_state = optimizer.optimizers[0].state_dict()
else:
optim_state = optimizer.state_dict()
return optim_state['param_groups'][0]['lr']
def set_lr(lr):
print('Learning rate annealed to: {lr:.6g}'.format(lr=lr))
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
# ADAM's LR typically is set 10x lower than SGD
sgd_optim_state = optimizer.optimizers[0].state_dict()
sgd_optim_state['param_groups'][0]['lr'] = lr
optimizer.optimizers[0].load_state_dict(sgd_optim_state)
adam_optim_state = optimizer.optimizers[1].state_dict()
# always fixed for adam
adam_optim_state['param_groups'][0]['lr'] = 1e-4
optimizer.optimizers[1].load_state_dict(adam_optim_state)
elif args.use_lookahead:
optim_state = optimizer.optimizer.state_dict()
optim_state['param_groups'][0]['lr'] = lr
optimizer.optimizer.load_state_dict(optim_state)
else:
optim_state = optimizer.state_dict()
optim_state['param_groups'][0]['lr'] = lr
optimizer.load_state_dict(optim_state)
def check_model_quality(epoch, checkpoint, train_loss, train_cer, train_wer):
gc.collect()
torch.cuda.empty_cache()
val_cer_sum, val_wer_sum, val_loss_sum = 0, 0, 0
num_chars, num_words, num_losses = 0, 0, 0
model.eval()
with torch.no_grad():
for i, data in tq(enumerate(test_loader), total=len(test_loader)):
# use if full phoneme decoding will be required
if False:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
phoneme_targets,
phoneme_target_sizes) = data
elif args.denoise:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
mask_targets) = data
else:
inputs, targets, filenames, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
if args.use_attention:
batch_size = inputs.size(0)
max_len = max(target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(target_sizes) == batch_size
for _, split_target in enumerate(split_targets):
trg[_, :target_sizes[_]] = split_target
trg = trg.long().to(device)
# trg_teacher_forcing = trg[:, :-1]
trg_val = trg
inputs = inputs.to(device)
if args.use_phonemes or args.grapheme_phoneme:
(logits, probs,
output_sizes,
phoneme_logits, phoneme_probs) = model(inputs, input_sizes)
elif args.denoise:
logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)
elif args.use_attention:
logits, output_sizes = model(inputs,
lengths=input_sizes)
# for our purposes they are the same
probs = logits
elif args.double_supervision:
ctc_logits, s2s_logits, output_sizes = model(inputs,
lengths=input_sizes)
# s2s decoder is the final decoder
probs = s2s_logits
else:
logits, probs, output_sizes = model(inputs, input_sizes)
if args.use_attention:
# this is kind of murky
# you can calculate this using teacher forcing unrolling
# or you can just assume
# that the smart network will produce outputs of similar length to gt
short_logits = logits[:, :trg_val.size(1), :].contiguous()
loss = criterion(short_logits.view(-1,
short_logits.size(-1)),
trg_val.contiguous().view(-1))
loss = loss / sum(target_sizes) # average the loss by number of tokens
loss = loss.to(device)
elif args.double_supervision:
# do not bother with loss here
loss = 0
loss_value = 0
else:
loss = criterion(logits.transpose(0, 1), targets, output_sizes.cpu(), target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
elif args.double_supervision:
pass
else:
loss_value = loss.item()
if loss_value == inf or loss_value == -inf:
print("WARNING: received an inf loss, setting loss value to 1000")
loss_value = 1000
loss_value = float(loss_value)
val_loss_sum = (val_loss_sum * 0.998 + loss_value * 0.002) # discount earlier losses
val_loss_sum += loss_value
num_losses += 1
decoded_output, _ = decoder.decode(probs, output_sizes,
use_attention=args.use_attention or args.double_supervision)
target_strings = decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)
if x < 1:
print("CER: {:6.2f}% WER: {:6.2f}% Filename: {}".format(cer/cer_ref*100, wer/wer_ref*100, filenames[x]))
print('Reference:', reference, '\nTranscript:', transcript)
times_used = test_dataset.curriculum[filenames[x]]['times_used']+1
test_dataset.update_curriculum(filenames[x],
reference, transcript,
None,
cer / cer_ref, wer / wer_ref,
times_used=times_used)
val_wer_sum += wer
val_cer_sum += cer
num_words += wer_ref
num_chars += cer_ref
if args.double_supervision:
del inputs, targets, input_percentages, input_sizes
del probs, output_sizes, target_sizes, loss
del ctc_logits, s2s_logits
del split_targets
else:
del inputs, targets, input_percentages, input_sizes
del logits, probs, output_sizes, target_sizes, loss
del split_targets
if args.cuda:
torch.cuda.synchronize()
val_wer = 100 * val_wer_sum / num_words
val_cer = 100 * val_cer_sum / num_chars
print('Validation Summary Epoch: [{0}]\t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(epoch + 1, wer=val_wer, cer=val_cer))
val_loss = val_loss_sum / num_losses
plots.loss_results[epoch] = train_loss
plots.wer_results[epoch] = train_wer
plots.cer_results[epoch] = train_cer
plots.epochs[epoch] = epoch + 1
checkpoint_plots.loss_results[checkpoint] = val_loss
checkpoint_plots.wer_results[checkpoint] = val_wer
checkpoint_plots.cer_results[checkpoint] = val_cer
checkpoint_plots.epochs[checkpoint] = checkpoint + 1
plots.plot_progress(epoch, train_loss, train_cer, train_wer)
checkpoint_plots.plot_progress(checkpoint, val_loss, val_cer, val_wer)
if args.checkpoint_anneal != 1.0:
global lr_plots
lr_plots.loss_results[checkpoint] = val_loss
lr_plots.epochs[checkpoint] = get_lr()
zero_loss = lr_plots.loss_results == 0
lr_plots.loss_results[zero_loss] = val_loss
lr_plots.epochs[zero_loss] = get_lr()
lr_plots.plot_progress(checkpoint, val_loss, val_cer, val_wer)
# only if trainval manifest provided
# separate scope not to mess with general flow too much
if args.train_val_manifest != '':
calculate_trainval_quality_metrics(checkpoint,
epoch,
trainval_loader,
trainval_checkpoint_plots)
return val_wer, val_cer
def calculate_trainval_quality_metrics(checkpoint,
epoch,
loader,
plots_handle):
val_cer_sum, val_wer_sum, val_loss_sum = 0, 0, 0
num_chars, num_words, num_losses = 0, 0, 0
model.eval()
with torch.no_grad():
for i, data in tq(enumerate(loader), total=len(loader)):
# use if full phoneme decoding will be required
if False:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
phoneme_targets,
phoneme_target_sizes) = data
elif args.denoise:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
mask_targets) = data
else:
inputs, targets, filenames, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# unflatten targets
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
if args.use_attention:
batch_size = inputs.size(0)
max_len = max(target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(target_sizes) == batch_size
for _, split_target in enumerate(split_targets):
trg[_, :target_sizes[_]] = split_target
trg = trg.long().to(device)
# trg_teacher_forcing = trg[:, :-1]
trg_val = trg
inputs = inputs.to(device)
if args.use_phonemes:
(logits, probs,
output_sizes,
phoneme_logits, phoneme_probs) = model(inputs, input_sizes)
elif args.denoise:
logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)
elif args.use_attention:
logits, output_sizes = model(inputs,
lengths=input_sizes)
# for our purposes they are the same
probs = logits
elif args.double_supervision:
ctc_logits, s2s_logits, output_sizes = model(inputs,
lengths=input_sizes)
# s2s decoder is the final decoder
probs = s2s_logits
else:
logits, probs, output_sizes = model(inputs, input_sizes)
if args.use_attention:
# this is kind of murky
# you can calculate this using teacher forcing unrolling
# or you can just assume
# that the smart network will produce outputs of similar length to gt
# some edge cases in annotation also may cause this to fail miserably
# hence a failsafe
max_loss_len = min(trg_val.size(1),
logits.size(1))
short_logits = logits[:, :max_loss_len, :].contiguous()
short_trg = trg_val[:, :max_loss_len].contiguous()
loss = criterion(short_logits.view(-1,
short_logits.size(-1)),
short_trg.view(-1))
loss = loss / sum(target_sizes) # average the loss by number of tokens
loss = loss.to(device)
elif args.double_supervision:
# do not bother with loss here
loss = 0
loss_value = 0
else:
loss = criterion(logits.transpose(0, 1), targets, output_sizes.cpu(), target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
elif args.double_supervision:
pass
else:
loss_value = loss.item()
if loss_value == inf or loss_value == -inf:
print("WARNING: received an inf loss, setting loss value to 1000")
loss_value = 1000
loss_value = float(loss_value)
val_loss_sum = (val_loss_sum * 0.998 + loss_value * 0.002) # discount earlier losses
val_loss_sum += loss_value
num_losses += 1
decoded_output, _ = decoder.decode(probs, output_sizes,
use_attention=args.use_attention or args.double_supervision)
target_strings = decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)
if x < 1:
print("CER: {:6.2f}% WER: {:6.2f}% Filename: {}".format(cer/cer_ref*100, wer/wer_ref*100, filenames[x]))
print('Reference:', reference, '\nTranscript:', transcript)
times_used = trainval_dataset.curriculum[filenames[x]]['times_used']+1
trainval_dataset.update_curriculum(filenames[x],
reference, transcript,
None,
cer / cer_ref, wer / wer_ref,
times_used=times_used)
val_wer_sum += wer
val_cer_sum += cer
num_words += wer_ref
num_chars += cer_ref
if args.double_supervision:
del inputs, targets, input_percentages, input_sizes
del probs, output_sizes, target_sizes, loss
del ctc_logits, s2s_logits
del split_targets
else:
del inputs, targets, input_percentages, input_sizes
del logits, probs, output_sizes, target_sizes, loss
del split_targets
if args.cuda:
torch.cuda.synchronize()
val_wer = 100 * val_wer_sum / num_words
val_cer = 100 * val_cer_sum / num_chars
print('TrainVal Summary Epoch: [{0}]\t'
'Average WER {wer:.3f}\t'
'Average CER {cer:.3f}\t'.format(epoch + 1, wer=val_wer, cer=val_cer))
val_loss = val_loss_sum / num_losses
plots_handle.loss_results[checkpoint] = val_loss
plots_handle.wer_results[checkpoint] = val_wer
plots_handle.cer_results[checkpoint] = val_cer
plots_handle.epochs[checkpoint] = checkpoint + 1
plots_handle.plot_progress(checkpoint, val_loss, val_cer, val_wer)
def save_validation_curriculums(save_folder,
checkpoint,
epoch,
iteration=0):
if iteration>0:
test_path = '%s/test_checkpoint_%04d_epoch_%02d_iter_%05d.csv' % (save_folder, checkpoint + 1, epoch + 1, iteration + 1)
else:
test_path = '%s/test_checkpoint_%04d_epoch_%02d.csv' % (save_folder, checkpoint + 1, epoch + 1)
print("Saving test curriculum to {}".format(test_path))
test_dataset.save_curriculum(test_path)
if args.train_val_manifest != '':
if iteration>0:
trainval_path = '%s/trainval_checkpoint_%04d_epoch_%02d_iter_%05d.csv' % (save_folder, checkpoint + 1, epoch + 1, iteration + 1)
else:
trainval_path = '%s/trainval_checkpoint_%04d_epoch_%02d.csv' % (save_folder, checkpoint + 1, epoch + 1)
print("Saving trainval curriculum to {}".format(trainval_path))
trainval_dataset.save_curriculum(trainval_path)
class Trainer:
def __init__(self):
self.end = time.time()
self.train_wer = 0
self.train_cer = 0
self.num_words = 0
self.num_chars = 0
def reset_scores(self):
self.train_wer = 0
self.train_cer = 0
self.num_words = 0
self.num_chars = 0
def get_cer(self):
return 100. * self.train_cer / (self.num_chars or 1)
def get_wer(self):
return 100. * self.train_wer / (self.num_words or 1)
def train_batch(self, epoch, batch_id, data):
if args.use_phonemes:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
phoneme_targets,
phoneme_target_sizes) = data
elif args.denoise:
(inputs,
targets,
filenames,
input_percentages,
target_sizes,
mask_targets) = data
mask_targets = mask_targets.squeeze(1).to(device)
elif args.double_supervision:
(inputs,
targets, s2s_targets,
filenames, input_percentages,
target_sizes, s2s_target_sizes) = data
else:
inputs, targets, filenames, input_percentages, target_sizes = data
input_sizes = input_percentages.mul_(int(inputs.size(3))).int()
# measure data loading time
data_time.update(time.time() - self.end)
inputs = inputs.to(device)
input_sizes = input_sizes.to(device)
split_targets = []
offset = 0
for size in target_sizes:
split_targets.append(targets[offset:offset + size])
offset += size
if args.double_supervision:
split_s2s_targets = []
offset = 0
for size in s2s_target_sizes:
split_s2s_targets.append(s2s_targets[offset:offset + size])
offset += size
batch_size = inputs.size(0)
max_len = max(s2s_target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(s2s_target_sizes) == batch_size
for _, split_target in enumerate(split_s2s_targets):
trg[_,:s2s_target_sizes[_]] = split_target
trg = trg.long().to(device)
trg_teacher_forcing = trg[:, :-1]
trg_y = trg[:, 1:]
if args.use_attention:
batch_size = inputs.size(0)
max_len = max(target_sizes)
# use CTC blank as pad token
# ctc blank has an index of zero
trg = torch.zeros(batch_size,
max_len)
assert len(target_sizes) == batch_size
for _, split_target in enumerate(split_targets):
trg[_,:target_sizes[_]] = split_target
trg = trg.long().to(device)
trg_teacher_forcing = trg[:, :-1]
trg_y = trg[:, 1:]
if args.use_phonemes:
(logits, probs,
output_sizes,
phoneme_logits, phoneme_probs) = model(inputs, input_sizes)
elif args.denoise:
logits, probs, output_sizes, mask_logits = model(inputs, input_sizes)
elif args.use_attention:
logits, output_sizes = model(inputs,
lengths=input_sizes,
trg=trg_teacher_forcing)
# for our purposes they are the same
probs = logits
elif args.double_supervision:
ctc_logits, s2s_logits, output_sizes = model(inputs,
lengths=input_sizes,
trg=trg_teacher_forcing)
# s2s decoder is the final decoder
probs = s2s_logits
# (batch x sequence x channels) => (seqLength x batch x outputDim)
ctc_logits = ctc_logits.transpose(0, 1)
else:
logits, probs, output_sizes = model(inputs, input_sizes)
if args.double_supervision:
assert ctc_logits.is_cuda
assert s2s_logits.is_cuda
else:
assert logits.is_cuda
assert probs.is_cuda
assert output_sizes.is_cuda
decoded_output, _ = decoder.decode(probs, output_sizes,
use_attention=args.use_attention or args.double_supervision)
if args.double_supervision:
target_strings = decoder.convert_to_strings(split_s2s_targets)
else:
target_strings = decoder.convert_to_strings(split_targets)
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
wer, cer, wer_ref, cer_ref = get_cer_wer(decoder, transcript, reference)
# accessing dict should be fast
times_used = train_dataset.curriculum[filenames[x]]['times_used']+1
train_dataset.update_curriculum(filenames[x],
reference, transcript,
None,
cer / cer_ref, wer / wer_ref,
times_used=times_used)
self.train_wer += wer
self.train_cer += cer
self.num_words += wer_ref
self.num_chars += cer_ref
if args.use_phonemes:
phoneme_logits = phoneme_logits.transpose(0, 1) # TxNxH
if not args.use_attention and not args.double_supervision:
logits = logits.transpose(0, 1) # TxNxH
if not args.double_supervision:
if torch.isnan(logits).any(): # and args.nan == 'zero':
# work around bad data
print("WARNING: Working around NaNs in data")
logits[torch.isnan(logits)] = 0
if args.use_phonemes:
# output_sizes should be the same
# for phoneme and non-phonemes
loss = criterion(logits,
targets,
output_sizes.cpu(),
target_sizes) + criterion(phoneme_logits,
phoneme_targets,
output_sizes.cpu(),
phoneme_target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
loss = loss.to(device)
elif args.denoise:
ctc_loss = 0
"""
ctc_loss = criterion(logits,
targets,
output_sizes.cpu(),
target_sizes).to(device) / inputs.size(0)
"""
mask_loss = 50.0 * mask_criterion(mask_logits,
mask_targets).to(device)
if torch.isnan(mask_loss):
print('Nan loss detected')
return 102
loss = ctc_loss + mask_loss
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item() * args.gradient_accumulation_steps
ctc_loss_value = ctc_loss # .item()
if ctc_loss_value == inf or ctc_loss_value == -inf:
print("WARNING: received an inf CTC loss, setting loss value to 1000")
ctc_loss_value = 1000
loss_value = 1000
elif args.use_attention:
loss = criterion(logits.contiguous().view(-1,
logits.size(-1)),
trg_y.contiguous().view(-1))
loss = loss / sum(target_sizes) # average the loss by number of tokens
if args.gradient_accumulation_steps > 1: # average loss by accumulation steps
loss = loss / args.gradient_accumulation_steps
loss = loss.to(device)
elif args.double_supervision:
ctc_loss = ctc_criterion(ctc_logits,
targets,
output_sizes.cpu(),
target_sizes)
ctc_loss = ctc_loss / inputs.size(0) # average the loss by minibatch
ctc_loss = ctc_loss.to(device)
s2s_loss = s2s_criterion(s2s_logits.contiguous().view(-1,
s2s_logits.size(-1)),
trg_y.contiguous().view(-1))
# average the loss by number of tokens
# multiply by 10 for weight
s2s_loss = 10 * s2s_loss / sum(s2s_target_sizes)
s2s_loss = s2s_loss.to(device)
loss = ctc_loss + s2s_loss
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item() * args.gradient_accumulation_steps
ctc_loss_value = ctc_loss.item()
if ctc_loss_value == inf or ctc_loss_value == -inf:
print("WARNING: received an inf CTC loss, setting loss value to 1000")
ctc_loss_value = 1000
loss_value = 1000
else:
loss = criterion(logits, targets, output_sizes.cpu(), target_sizes)
loss = loss / inputs.size(0) # average the loss by minibatch
if args.gradient_accumulation_steps > 1: # average loss by accumulation steps
loss = loss / args.gradient_accumulation_steps
loss = loss.to(device)
if not args.denoise:
inf = float("inf")
if args.distributed:
loss_value = reduce_tensor(loss, args.world_size).item()
else:
loss_value = loss.item() * args.gradient_accumulation_steps
if loss_value == inf or loss_value == -inf:
print("WARNING: received an inf loss, setting loss value to 1000")
loss_value = 1000
loss_value = float(loss_value)
losses.update(loss_value, inputs.size(0))
if args.denoise:
mask_accuracy.update(mask_metric(mask_logits, mask_targets).item(),
inputs.size(0))
mask_losses.update(mask_loss.item(),
inputs.size(0))
ctc_losses.update(ctc_loss_value,
inputs.size(0))
elif args.double_supervision:
ctc_losses.update(ctc_loss_value,
inputs.size(0))
s2s_losses.update(s2s_loss.item(),
inputs.size(0))
# update_curriculum
if (batch_id + 1) % args.gradient_accumulation_steps == 0:
# compute gradient
optimizer.zero_grad()
loss.backward()
# try just lr reduction
# instead of gradient clipping
lr_clipping = False
# spare time by doing clipping
# only once each N epochs
if args.max_norm > 0:
if epoch < args.norm_warmup_epochs:
if lr_clipping:
raise ValueError('LEGACY')
clip_coef = calc_grad_norm(model.parameters(),
args.max_norm)
underlying_lr = get_lr()
set_lr(underlying_lr * clip_coef)
else:
clip_grad_norm_(model.parameters(),
args.max_norm)
else:
raise ValueError('LEGACY')
# clip only when gradients explode
if loss_value == inf or loss_value == -inf:
clip_grad_norm_(model.parameters(),
args.max_norm)
# if torch.isnan(logits).any():
# # work around bad data
# print("WARNING: Skipping NaNs in backward step")
# SGD step
optimizer.step()
if lr_clipping:
set_lr(underlying_lr)
if args.enorm:
enorm.step()
# measure elapsed time
batch_time.update(time.time() - self.end)
if not args.silent:
if args.denoise:
print('GPU-{0} Epoch {1} [{2}/{3}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\t'
'Data {data_time.val:.2f} ({data_time.avg:.2f})\t'
'Loss {loss.val:.2f} ({loss.avg:.2f})\t'
'CTC Loss {ctc_losses.val:.2f} ({ctc_losses.avg:.2f})\t'
'Mask Loss {mask_losses.val:.2f} ({mask_losses.avg:.2f})\t'
'Mask {mask_accuracy.val:.2f} ({mask_accuracy.avg:.2f})\t'.format(
args.gpu_rank or VISIBLE_DEVICES[0],
epoch + 1, batch_id + 1, len(train_sampler),
batch_time=batch_time, data_time=data_time, loss=losses,
mask_losses=mask_losses, ctc_losses=ctc_losses,
mask_accuracy=mask_accuracy))
elif args.double_supervision:
print('GPU-{0} Epoch {1} [{2}/{3}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\t'
'Data {data_time.val:.2f} ({data_time.avg:.2f})\t'
'Loss {loss.val:.2f} ({loss.avg:.2f})\t'
'CTC Loss {ctc_losses.val:.2f} ({ctc_losses.avg:.2f})\t'
'S2S Loss {s2s_losses.val:.2f} ({s2s_losses.avg:.2f})\t'.format(
args.gpu_rank or VISIBLE_DEVICES[0],
epoch + 1, batch_id + 1, len(train_sampler),
batch_time=batch_time, data_time=data_time, loss=losses,
ctc_losses=ctc_losses, s2s_losses=s2s_losses))
else:
print('GPU-{0} Epoch {1} [{2}/{3}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f})\t'
'Data {data_time.val:.2f} ({data_time.avg:.2f})\t'
'Loss {loss.val:.2f} ({loss.avg:.2f})\t'.format(
args.gpu_rank or VISIBLE_DEVICES[0],
epoch + 1, batch_id + 1, len(train_sampler),
batch_time=batch_time, data_time=data_time, loss=losses))
if args.double_supervision:
del inputs, targets, input_percentages, input_sizes
del probs, output_sizes, target_sizes, loss, ctc_loss, s2s_loss
del s2s_targets, s2s_target_sizes
del ctc_logits, s2s_logits
else:
del inputs, targets, input_percentages, input_sizes
del logits, probs, output_sizes, target_sizes, loss
return loss_value
def init_train_set(epoch, from_iter):
#train_dataset.set_curriculum_epoch(epoch, sample=True)
train_dataset.set_curriculum_epoch(epoch,
sample=args.use_curriculum,
sample_size=args.curriculum_ratio,
cl_point=args.cl_point)
global train_loader, train_sampler
if not args.distributed:
if args.batch_similar_lens:
print('Using BucketingLenSampler')
train_sampler = BucketingLenSampler(train_dataset, batch_size=args.batch_size)
else:
train_sampler = BucketingSampler(train_dataset, batch_size=args.batch_size)
train_sampler.bins = train_sampler.bins[from_iter:]
else:
train_sampler = DistributedBucketingSampler(train_dataset,
batch_size=args.batch_size,
num_replicas=args.world_size,
rank=args.rank)
train_loader = AudioDataLoader(train_dataset,
num_workers=args.num_workers,
batch_sampler=train_sampler,
pin_memory=True)
if (not args.no_shuffle and epoch != 0) or args.no_sorta_grad:
print("Shuffling batches for the following epochs")
train_sampler.shuffle(epoch)
def train(from_epoch, from_iter, from_checkpoint):
print('Starting training with id="{}" at GPU="{}" with lr={}'.format(args.id, args.gpu_rank or VISIBLE_DEVICES[0],
get_lr()))
checkpoint_per_batch = 1+(args.checkpoint_per_samples-1) // args.batch_size if args.checkpoint_per_samples > 0 else 0
trainer = Trainer()
checkpoint = from_checkpoint
best_score = None
for epoch in range(from_epoch, args.epochs):
init_train_set(epoch, from_iter=from_iter)
trainer.reset_scores()
total_loss = 0
num_losses = 1
model.train()
trainer.end = time.time()
start_epoch_time = time.time()
for i, data in enumerate(train_loader, start=from_iter):
if i >= len(train_sampler) + start_iter:
break
total_loss += trainer.train_batch(epoch, i, data)
num_losses += 1
if (i + 1) % 50 == 0:
# deal with GPU memory fragmentation
gc.collect()
torch.cuda.empty_cache()
if checkpoint_per_batch > 0 and is_leader:
if (i + 1) % checkpoint_per_batch == 0:
file_path = '%s/checkpoint_%04d_epoch_%02d_iter_%05d.model' % (save_folder, checkpoint + 1, epoch + 1, i + 1)
print("Saving checkpoint model to %s" % file_path)
if args.use_lookahead:
_optimizer = optimizer.optimizer
else:
_optimizer = optimizer
torch.save(DeepSpeech.serialize(model, optimizer=_optimizer, epoch=epoch,
iteration=i,
loss_results=plots.loss_results,
wer_results=plots.wer_results,
cer_results=plots.cer_results,
checkpoint=checkpoint,
checkpoint_loss_results=checkpoint_plots.loss_results,
checkpoint_wer_results=checkpoint_plots.wer_results,
checkpoint_cer_results=checkpoint_plots.cer_results,
trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,
trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,
trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,
avg_loss=total_loss / num_losses), file_path)
train_dataset.save_curriculum(file_path + '.csv')
del _optimizer
check_model_quality(epoch, checkpoint, total_loss / num_losses, trainer.get_cer(), trainer.get_wer())
save_validation_curriculums(save_folder, checkpoint + 1, epoch + 1, i + 1)
checkpoint += 1
gc.collect()
torch.cuda.empty_cache()
model.train()
if args.checkpoint_anneal != 1:
print("Checkpoint:", checkpoint)
set_lr(get_lr() / args.checkpoint_anneal)
trainer.end = time.time()
epoch_time = time.time() - start_epoch_time
print('Training Summary Epoch: [{0}]\t'
'Time taken (s): {epoch_time:.0f}\t'
'Average Loss {loss:.3f}\t'.format(epoch + 1, epoch_time=epoch_time, loss=total_loss / num_losses))
from_iter = 0 # Reset start iteration for next epoch
if trainer.num_chars == 0:
continue
wer_avg, cer_avg = check_model_quality(epoch, checkpoint, total_loss / num_losses, trainer.get_cer(), trainer.get_wer())
new_score = wer_avg + cer_avg
checkpoint += 1
if args.checkpoint and is_leader: # checkpoint after the end of each epoch
file_path = '%s/model_checkpoint_%04d_epoch_%02d.model' % (save_folder, checkpoint+1, epoch + 1)
if args.use_lookahead:
_optimizer = optimizer.optimizer
else:
_optimizer = optimizer
torch.save(DeepSpeech.serialize(model,
optimizer=_optimizer,
epoch=epoch,
loss_results=plots.loss_results,
wer_results=plots.wer_results,
cer_results=plots.cer_results,
checkpoint=checkpoint,
checkpoint_loss_results=checkpoint_plots.loss_results,
checkpoint_wer_results=checkpoint_plots.wer_results,
checkpoint_cer_results=checkpoint_plots.cer_results,
trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,
trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,
trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,
), file_path)
train_dataset.save_curriculum(file_path + '.csv')
save_validation_curriculums(save_folder, checkpoint + 1, epoch + 1, 0)
del _optimizer
# anneal lr
print("Checkpoint:", checkpoint)
set_lr(get_lr() / args.learning_anneal)
if (best_score is None or new_score < best_score) and is_leader:
print("Found better validated model, saving to %s" % args.model_path)
if args.use_lookahead:
_optimizer = optimizer.optimizer
else:
_optimizer = optimizer
torch.save(DeepSpeech.serialize(model,
optimizer=_optimizer,
epoch=epoch,
loss_results=plots.loss_results,
wer_results=plots.wer_results,
cer_results=plots.cer_results,
checkpoint=checkpoint,
checkpoint_loss_results=checkpoint_plots.loss_results,
checkpoint_wer_results=checkpoint_plots.wer_results,
checkpoint_cer_results=checkpoint_plots.cer_results,
trainval_checkpoint_loss_results=trainval_checkpoint_plots.loss_results,
trainval_checkpoint_wer_results=trainval_checkpoint_plots.wer_results,
trainval_checkpoint_cer_results=trainval_checkpoint_plots.cer_results,
),
args.model_path)
train_dataset.save_curriculum(args.model_path + '.csv')
del _optimizer
best_score = new_score
if __name__ == '__main__':
args = parser.parse_args()
assert args.use_phonemes + args.denoise + args.grapheme_phoneme < 2
assert args.double_supervision + args.use_attention < 2
# упячка, я идиот, убейте меня кто-нибудь
if args.use_phonemes:
from data.data_loader_aug import AudioDataLoaderPhoneme as AudioDataLoader
elif args.denoise:
from data.data_loader_aug import AudioDataLoaderDenoise as AudioDataLoader
elif args.double_supervision:
from data.data_loader_aug import AudioDataLoaderDouble as AudioDataLoader
else:
from data.data_loader_aug import AudioDataLoader
if args.double_supervision:
from data.data_loader_aug import AudioDataLoader as AudioDataLoaderVal
else:
AudioDataLoaderVal = AudioDataLoader
args.distributed = args.world_size > 1
args.model_path = os.path.join(args.save_folder, 'best.model')
is_leader = True
device = torch.device("cuda" if args.cuda else "cpu")
if args.distributed:
if args.gpu_rank:
torch.cuda.set_device(int(args.gpu_rank))
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
is_leader = args.rank == 0 # Only the first proc should save models
save_folder = args.save_folder
os.makedirs(save_folder, exist_ok=True)
plots = PlotWindow(args.id, 'train_loss_epochs', log_y=True)
checkpoint_plots = PlotWindow(args.id, 'test_loss_checks', log_y=True)
if args.train_val_manifest != '':
trainval_checkpoint_plots = PlotWindow(args.id, 'val_loss_checks', log_y=True)
else:
# set all properties to None for easy backwards compatibility
trainval_checkpoint_plots = t = type('test', (object,), {})()
trainval_checkpoint_plots.loss_results = None
trainval_checkpoint_plots.wer_results = None
trainval_checkpoint_plots.cer_results = None
lr_plots = LRPlotWindow(args.id, 'lr_finder', log_x=True)
total_avg_loss, start_epoch, start_iter, start_checkpoint = 0, 0, 0, 0
if args.use_phonemes:
with open(args.phonemes_path) as phoneme_file:
phoneme_map = {l: i for i, l
in enumerate(json.load(phoneme_file))}
if args.continue_from: # Starting from previous model
print("Loading checkpoint model %s" % args.continue_from)
package = torch.load(args.continue_from, map_location=lambda storage, loc: storage)
# package['dropout']=0.2
model = DeepSpeech.load_model_package(package)
# start with non-phoneme model, continue with phonemes
labels = DeepSpeech.get_labels(model)
audio_conf = DeepSpeech.get_audio_conf(model)
# in case you need to resume and change audio conf manually
"""
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride,
window=args.window,
noise_dir=args.noise_dir,
noise_prob=args.noise_prob,
noise_levels=(args.noise_min, args.noise_max),
aug_prob_8khz=args.aug_prob_8khz,
aug_prob_spect=args.aug_prob_spect)
if args.use_phonemes:
audio_conf['phoneme_count'] = len(phoneme_map)
audio_conf['phoneme_map'] = phoneme_map
"""
if args.use_phonemes and package.get('phoneme_count', 0) == 0:
model = DeepSpeech.add_phonemes_to_model(model,
len(phoneme_map))
audio_conf['phoneme_count'] = len(phoneme_map)
audio_conf['phoneme_map'] = phoneme_map
model.phoneme_count = len(phoneme_map)
if args.denoise and package.get('denoise', False) == False:
model = DeepSpeech.add_denoising_to_model(model)
print('Model transformed to a denoising one')
audio_conf['denoise'] = True
audio_conf['noise_prob'] = args.noise_prob
audio_conf['aug_type'] = args.aug_type
audio_conf['pytorch_stft'] = True
print('Changed audio conf params')
if args.use_attention:
if args.use_bpe:
from data.bpe_labels import Labels as BPELabels
labels = BPELabels(sp_model=args.sp_model,
use_phonemes=False,
s2s_decoder=args.use_attention)
# list instead of string
labels = labels.label_list
model = DeepSpeech.add_s2s_decoder_to_model(model,
labels=labels)
print('Model transformed to a model with full s2s decoder')
# REMOVE LATER
# audio_conf['noise_dir'] = '../data/augs/*.wav'
# audio_conf['noise_prob'] = 0.1
if args.double_supervision or 'transformer' in args.rnn_type:
optimizer = build_optimizer(args,
model=model)
else:
parameters = model.parameters()
optimizer = build_optimizer(args,
parameters_=parameters)
if not args.finetune: # Don't want to restart training
model = model.to(device)
# when adding phonemes, optimizer state is not full
try:
optimizer.load_state_dict(package['optim_dict'])
# set_lr(args.lr)
print('Current LR {}'.format(
optimizer.state_dict()['param_groups'][0]['lr']
))
except:
if args.double_supervision or 'transformer' in args.rnn_type or args.grapheme_phoneme:
optim_state = package['optim_dict'][0]
lr = optim_state['param_groups'][0]['lr']
print('Just setting the SGD LR {}'.format(lr))
set_lr(lr)
else:
print('Just changing the LR in the optimizer')
# set_lr(package['optim_dict']['param_groups'][0]['lr'])
set_lr(args.lr)
start_epoch = int(package.get('epoch', 1)) - 1 # Index start at 0 for training
start_iter = package.get('iteration', None)
start_checkpoint = package.get('checkpoint', 0) or 0
if start_iter is None:
start_epoch += 1 # We saved model after epoch finished, start at the next epoch.
start_iter = 0
else:
start_iter += 1
total_avg_loss = int(package.get('avg_loss', 0))
plots.loss_results = package['loss_results']
plots.cer_results = package['cer_results']
plots.wer_results = package['wer_results']
if package.get('checkpoint_cer_results') is not None:
checkpoint_plots.loss_results = package.get('checkpoint_loss_results', torch.Tensor(10000))
checkpoint_plots.cer_results = package.get('checkpoint_cer_results', torch.Tensor(10000))
checkpoint_plots.wer_results = package.get('checkpoint_wer_results', torch.Tensor(10000))
if package['cer_results'] is not None and start_epoch > 0:
plots.plot_history(start_epoch)
if package.get('checkpoint_cer_results') is not None and start_checkpoint > 0:
checkpoint_plots.plot_history(start_checkpoint)
if args.train_val_manifest != '':
if package.get('trainval_checkpoint_cer_results') is not None:
trainval_checkpoint_plots.loss_results = package.get('trainval_checkpoint_loss_results', torch.Tensor(10000))
trainval_checkpoint_plots.cer_results = package.get('trainval_checkpoint_cer_results', torch.Tensor(10000))
trainval_checkpoint_plots.wer_results = package.get('trainval_checkpoint_wer_results', torch.Tensor(10000))
if package.get('trainval_checkpoint_cer_results') is not None and start_checkpoint > 0:
trainval_checkpoint_plots.plot_history(start_checkpoint)
else:
if args.use_bpe:
from data.bpe_labels import Labels as BPELabels
labels = BPELabels(sp_model=args.sp_model,
use_phonemes=args.phonemes_only,
s2s_decoder=args.use_attention or args.double_supervision,
double_supervision=False,
naive_split=args.naive_split,
omit_spaces=args.omit_spaces,
subword_regularization=args.subword_regularization)
# list instead of string
labels = labels.label_list
# in case of double supervision just use the longer
# i.e. s2s = blank(pad) + base_num + space + eos + sos
# ctc = blank(pad) + base_num + space + 2
# len(ctc) = len(s2s) - 1
else:
with open(args.labels_path) as label_file:
# labels is a string
labels = str(''.join(json.load(label_file)))
assert args.pytorch_stft != args.pytorch_mel
audio_conf = dict(sample_rate=args.sample_rate,
window_size=args.window_size,
window_stride=args.window_stride,
window=args.window,
noise_dir=args.noise_dir,
noise_prob=args.noise_prob,
noise_levels=(args.noise_min, args.noise_max),
aug_prob_8khz=args.aug_prob_8khz,
aug_prob_spect=args.aug_prob_spect,
use_bpe=args.use_bpe,
sp_model=args.sp_model,
aug_type=args.aug_type,
pytorch_mel=args.pytorch_mel,
pytorch_stft=args.pytorch_stft,
denoise=args.denoise)
if args.use_phonemes:
audio_conf['phoneme_count'] = len(phoneme_map)
audio_conf['phoneme_map'] = phoneme_map
rnn_type = args.rnn_type.lower()
assert rnn_type in supported_rnns, "rnn_type should be either lstm, rnn or gru"
model = DeepSpeech(rnn_hidden_size=args.hidden_size,
cnn_width=args.cnn_width,
nb_layers=args.hidden_layers,
labels=labels,
rnn_type=rnn_type,
audio_conf=audio_conf,
bidirectional=args.bidirectional,
bnm=args.batch_norm_momentum,
dropout=args.dropout,
phoneme_count=len(phoneme_map) if args.use_phonemes else 0,
decoder_layers=args.decoder_layers,
kernel_size=args.kernel_size,
decoder_girth=args.decoder_girth)
if args.use_lookahead:
model = model.to(device)
if args.double_supervision or 'transformer' in args.rnn_type:
optimizer = build_optimizer(args,
model=model)
else:
parameters = model.parameters()
optimizer = build_optimizer(args,
parameters_=parameters)
# enorm = ENorm(model.named_parameters(), optimizer, c=1)
if args.use_attention:
criterion = torch.nn.NLLLoss(reduction='sum',
ignore_index=0) # use ctc blank token as pad token
elif args.double_supervision:
ctc_criterion = CTCLoss()
s2s_criterion = torch.nn.NLLLoss(reduction='sum',
ignore_index=0) # use ctc blank token as pad token
else:
criterion = CTCLoss()
if args.denoise:
mask_criterion = SemsegLoss(bce_weight=1.0,
dice_weight=0.0,
mse_weight=0.0)
mask_metric = MaskSimilarity(thresholds=[0.05, 0.1, 0.15])
# if double supervision used, s2s head is the last one
# and actually partakes in the decoding
decoder = GreedyDecoder(labels,
cut_after_eos_token=args.use_attention or args.double_supervision,
eos_token=']')
print('Label length {}'.format(len(labels)))
print(labels)
print('Audio conf')
print(audio_conf)
train_dataset = SpectrogramDataset(audio_conf=audio_conf, cache_path=args.cache_dir,
manifest_filepath=args.train_manifest,
labels=labels, normalize=args.norm, augment=args.augment,
curriculum_filepath=args.curriculum,
use_attention=args.use_attention,
double_supervision=args.double_supervision,
naive_split=args.naive_split,
phonemes_only=args.phonemes_only,
omit_spaces=args.omit_spaces,
subword_regularization=args.subword_regularization)
test_audio_conf = {**audio_conf,
'noise_prob': 0,
'aug_prob_8khz':0,
'aug_prob_spect':0,
'phoneme_count':0,
'phoneme_map':None}
print('Test audio conf')
print(test_audio_conf)
# no augs on test
# on test, even in case of double supervision
# we just need s2s data to validate
test_dataset = SpectrogramDataset(audio_conf=test_audio_conf,
cache_path=args.cache_dir,
manifest_filepath=args.val_manifest,
labels=labels, normalize=args.norm, augment=False,
use_attention=args.use_attention or args.double_supervision,
double_supervision=False,
naive_split=args.naive_split,
phonemes_only=args.phonemes_only,
omit_spaces=args.omit_spaces,
subword_regularization=False) # turn off augs on val
# if file is specified
# separate train validation wo domain shift
# also wo augs
# on test, even in case of double supervision
# we just need s2s data to validate
if args.train_val_manifest != '':
trainval_dataset = SpectrogramDataset(audio_conf=test_audio_conf,
cache_path=args.cache_dir,
manifest_filepath=args.train_val_manifest,
labels=labels, normalize=args.norm, augment=False,
use_attention=args.use_attention or args.double_supervision,
double_supervision=False,
naive_split=args.naive_split,
phonemes_only=args.phonemes_only,
omit_spaces=args.omit_spaces,
subword_regularization=False) # turn off augs on val
if args.reverse_sort:
# XXX: A hack to test max memory load.
train_dataset.ids.reverse()
test_loader = AudioDataLoaderVal(test_dataset,
batch_size=args.val_batch_size,
num_workers=args.num_workers)
if args.train_val_manifest != '':
trainval_loader = AudioDataLoaderVal(trainval_dataset,
batch_size=args.val_batch_size,
num_workers=args.num_workers)
if not args.use_lookahead:
model = model.to(device)
if args.distributed:
device_id = [int(args.gpu_rank)] if args.rank else None
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=device_id)
elif args.data_parallel:
model = torch.nn.DataParallel(model).to(device)
print('Using DP')
print(model)
print("Number of parameters: %d" % DeepSpeech.get_param_size(model))
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
if args.denoise:
mask_accuracy = AverageMeter()
mask_losses = AverageMeter()
ctc_losses = AverageMeter()
if args.double_supervision:
ctc_losses = AverageMeter()
s2s_losses = AverageMeter()
train(start_epoch, start_iter, start_checkpoint)
| [
"torch.nn.NLLLoss",
"torch.device",
"torch.zeros",
"torch.cuda.manual_seed_all",
"torch.stack",
"torch.arange",
"torch.cuda.synchronize",
"torch.distributed.init_process_group",
"torch.isnan",
"torch.no_grad",
"torch.optim.SGD",
"torch.optim.Adam",
"torch.nn.parallel.DistributedDataParallel",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.load",
"torch.Tensor",
"torch.nn.DataParallel"
] | 1.0 | snakers4/deepspeech.pytorch | 4c9e9648fe56247169d73248340f3a60572d3f5b |
1.0 | # -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: [email protected]
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import os
import time
import pickle
import numpy as np
import torch
import torch.nn as nn
DEFAULT_DTYPE = torch.float32
def create_prior(prior_type, **kwargs):
if prior_type == 'gmm':
prior = MaxMixturePrior(**kwargs)
elif prior_type == 'l2':
return L2Prior(**kwargs)
elif prior_type == 'angle':
return SMPLifyAnglePrior(**kwargs)
elif prior_type == 'none' or prior_type is None:
# Don't use any pose prior
def no_prior(*args, **kwargs):
return 0.0
prior = no_prior
else:
raise ValueError('Prior {}'.format(prior_type) + ' is not implemented')
return prior
class SMPLifyAnglePrior(nn.Module):
def __init__(self, dtype=torch.float32, **kwargs):
super(SMPLifyAnglePrior, self).__init__()
# Indices for the roration angle of
# 55: left elbow, 90deg bend at -np.pi/2
# 58: right elbow, 90deg bend at np.pi/2
# 12: left knee, 90deg bend at np.pi/2
# 15: right knee, 90deg bend at np.pi/2
angle_prior_idxs = np.array([55, 58, 12, 15], dtype=np.int64)
angle_prior_idxs = torch.tensor(angle_prior_idxs, dtype=torch.long)
self.register_buffer('angle_prior_idxs', angle_prior_idxs)
angle_prior_signs = np.array([1, -1, -1, -1],
dtype=np.float32 if dtype == torch.float32
else np.float64)
angle_prior_signs = torch.tensor(angle_prior_signs,
dtype=dtype)
self.register_buffer('angle_prior_signs', angle_prior_signs)
def forward(self, pose, with_global_pose=False):
''' Returns the angle prior loss for the given pose
Args:
pose: (Bx[23 + 1] * 3) torch tensor with the axis-angle
representation of the rotations of the joints of the SMPL model.
Kwargs:
with_global_pose: Whether the pose vector also contains the global
orientation of the SMPL model. If not then the indices must be
corrected.
Returns:
A sze (B) tensor containing the angle prior loss for each element
in the batch.
'''
angle_prior_idxs = self.angle_prior_idxs - (not with_global_pose) * 3
return torch.exp(pose[:, angle_prior_idxs] *
self.angle_prior_signs).pow(2)
class L2Prior(nn.Module):
def __init__(self, dtype=DEFAULT_DTYPE, reduction='sum', **kwargs):
super(L2Prior, self).__init__()
def forward(self, module_input, *args):
return torch.sum(module_input.pow(2))
class MaxMixturePrior(nn.Module):
def __init__(self, prior_folder='prior',
num_gaussians=6, dtype=DEFAULT_DTYPE, epsilon=1e-16,
use_merged=True,
**kwargs):
super(MaxMixturePrior, self).__init__()
if dtype == DEFAULT_DTYPE:
np_dtype = np.float32
elif dtype == torch.float64:
np_dtype = np.float64
else:
print('Unknown float type {}, exiting!'.format(dtype))
sys.exit(-1)
self.num_gaussians = num_gaussians
self.epsilon = epsilon
self.use_merged = use_merged
gmm_fn = 'gmm_{:02d}.pkl'.format(num_gaussians)
full_gmm_fn = os.path.join(prior_folder, gmm_fn)
if not os.path.exists(full_gmm_fn):
print('The path to the mixture prior "{}"'.format(full_gmm_fn) +
' does not exist, exiting!')
sys.exit(-1)
with open(full_gmm_fn, 'rb') as f:
gmm = pickle.load(f, encoding='latin1')
if type(gmm) == dict:
means = gmm['means'].astype(np_dtype)
covs = gmm['covars'].astype(np_dtype)
weights = gmm['weights'].astype(np_dtype)
elif 'sklearn.mixture.gmm.GMM' in str(type(gmm)):
means = gmm.means_.astype(np_dtype)
covs = gmm.covars_.astype(np_dtype)
weights = gmm.weights_.astype(np_dtype)
else:
print('Unknown type for the prior: {}, exiting!'.format(type(gmm)))
sys.exit(-1)
self.register_buffer('means', torch.tensor(means, dtype=dtype))
self.register_buffer('covs', torch.tensor(covs, dtype=dtype))
precisions = [np.linalg.inv(cov) for cov in covs]
precisions = np.stack(precisions).astype(np_dtype)
self.register_buffer('precisions',
torch.tensor(precisions, dtype=dtype))
# The constant term:
sqrdets = np.array([(np.sqrt(np.linalg.det(c)))
for c in gmm['covars']])
const = (2 * np.pi)**(69 / 2.)
nll_weights = np.asarray(gmm['weights'] / (const *
(sqrdets / sqrdets.min())))
nll_weights = torch.tensor(nll_weights, dtype=dtype).unsqueeze(dim=0)
self.register_buffer('nll_weights', nll_weights)
weights = torch.tensor(gmm['weights'], dtype=dtype).unsqueeze(dim=0)
self.register_buffer('weights', weights)
self.register_buffer('pi_term',
torch.log(torch.tensor(2 * np.pi, dtype=dtype)))
cov_dets = [np.log(np.linalg.det(cov.astype(np_dtype)) + epsilon)
for cov in covs]
self.register_buffer('cov_dets',
torch.tensor(cov_dets, dtype=dtype))
# The dimensionality of the random variable
self.random_var_dim = self.means.shape[1]
def get_mean(self):
''' Returns the mean of the mixture '''
mean_pose = torch.matmul(self.weights, self.means)
return mean_pose
def merged_log_likelihood(self, pose, betas):
diff_from_mean = pose.unsqueeze(dim=1) - self.means
prec_diff_prod = torch.einsum('mij,bmj->bmi',
[self.precisions, diff_from_mean])
diff_prec_quadratic = (prec_diff_prod * diff_from_mean).sum(dim=-1)
curr_loglikelihood = 0.5 * diff_prec_quadratic - \
torch.log(self.nll_weights)
# curr_loglikelihood = 0.5 * (self.cov_dets.unsqueeze(dim=0) +
# self.random_var_dim * self.pi_term +
# diff_prec_quadratic
# ) - torch.log(self.weights)
min_likelihood, _ = torch.min(curr_loglikelihood, dim=1)
return min_likelihood
def log_likelihood(self, pose, betas, *args, **kwargs):
''' Create graph operation for negative log-likelihood calculation
'''
likelihoods = []
for idx in range(self.num_gaussians):
mean = self.means[idx]
prec = self.precisions[idx]
cov = self.covs[idx]
diff_from_mean = pose - mean
curr_loglikelihood = torch.einsum('bj,ji->bi',
[diff_from_mean, prec])
curr_loglikelihood = torch.einsum('bi,bi->b',
[curr_loglikelihood,
diff_from_mean])
cov_term = torch.log(torch.det(cov) + self.epsilon)
curr_loglikelihood += 0.5 * (cov_term +
self.random_var_dim *
self.pi_term)
likelihoods.append(curr_loglikelihood)
log_likelihoods = torch.stack(likelihoods, dim=1)
min_idx = torch.argmin(log_likelihoods, dim=1)
weight_component = self.nll_weights[:, min_idx]
weight_component = -torch.log(weight_component)
return weight_component + log_likelihoods[:, min_idx]
def forward(self, pose, betas):
if self.use_merged:
return self.merged_log_likelihood(pose, betas)
else:
return self.log_likelihood(pose, betas)
| [
"torch.stack",
"torch.min",
"torch.einsum",
"torch.argmin",
"torch.det",
"torch.tensor",
"torch.log",
"torch.matmul",
"torch.exp"
] | 1.0.1 | omidrk/computervisionPanopticToSMPLAuto | b84b60f0ec4ffdb4ae61348919a95f7bb2eab926 |
1.5 | from itertools import product
import numpy as np
import pandas as pd
import torch
from trains import Task
from models.detection.SSD.priorbox_optimization import PriorOptimizationInput, ImageSizeTuple
from models.detection.SSD.priorbox_optimization.bbox_clustering import get_box_pairwise_iou
def collect_ground_truth_stats(ground_truth_loader):
def just_meta_iter(loader):
for gt in loader:
yield gt[-1]
gt = list(just_meta_iter(ground_truth_loader))
gt_df = get_gt_df_from_gt(gt)
return gt_df
def get_gt_df_from_gt(gt):
# removing all "crowd" labels
def process_meta_element(element):
boxes = element['boxes']
iscrowd = element['iscrowd']
labels = element['labels']
orig_boxes = [box for box, crowd in zip(boxes, iscrowd) if not crowd]
orig_labels = [label for label, crowd in zip(labels, iscrowd) if not crowd]
orig_boxes = np.around(orig_boxes)
width = np.around(orig_boxes[:, 2] - orig_boxes[:, 0])
height = np.around(orig_boxes[:, 3] - orig_boxes[:, 1])
area = width * height
good_boxes = np.where(area > 0)[0]
if len(good_boxes) != len(orig_boxes):
boxes = orig_boxes[good_boxes]
labels = np.array(orig_labels)[good_boxes].tolist()
height = height[good_boxes]
width = width[good_boxes]
else:
boxes = orig_boxes
labels = orig_labels
pairwise_iou = get_box_pairwise_iou(boxes)
score = np.around(pairwise_iou.sum(axis=0) - 1, decimals=2)
return [(w, h, label, q) for w, h, label, q in zip(width, height, labels, score)]
processed_gt = [process_meta_element(el) for elem in gt for el in elem if len(el['boxes']) > 0]
all_gt = [elem for elements in processed_gt for elem in elements]
column_names = ['width', 'height', 'label', 'overlap_score']
return pd.DataFrame(all_gt, columns=column_names)
def get_optimization_input(ground_truth_df, fmap_sizes, input_priors, image_size):
def fmap_to_pixel_fov(fmap_sizes):
# fm = [np.array([fmap, fmap]) for fmap in fmap_sizes]
# fm_np = np.vstack(fm)
# fm_in_pixels = np.array(image_size) / fm_np
fm_in_pixels = np.array(image_size) * \
np.array([3/fmap_sizes[-7], 3/fmap_sizes[-6], 3/(fmap_sizes[-5]+2), 3/(fmap_sizes[-4]+2),
3/(fmap_sizes[-3]+2), 3/(fmap_sizes[-2]+2), 1])
fm_in_pixels = [np.array([fmap, fmap]) for fmap in fm_in_pixels]
fm_in_pixels = np.vstack(fm_in_pixels)
return pd.DataFrame(fm_in_pixels, columns=['width', 'height'])
task = Task.current_task()
fmap = [np.array([fmap, fmap]) for fmap in fmap_sizes]
task.upload_artifact('feature_maps_sizes', pd.DataFrame(np.vstack(fmap), columns=['width', 'height']))
fmap_df = fmap_to_pixel_fov(fmap_sizes)
task.upload_artifact('feature_maps_pixel_fov', fmap_df)
in_priors_df = pd.DataFrame(input_priors.numpy(), columns=['match_group', 'width', 'height'])
target_image_size = ImageSizeTuple(w=image_size, h=image_size)
return PriorOptimizationInput(
target_image_size=target_image_size,
gt_bbox=ground_truth_df,
fmap_sizes=fmap_df,
in_priors=in_priors_df,
)
def convert_optimization_result_to_priors(fm_sizes, steps, opt_result):
priors_output = opt_result.out_priors
by_resolution = list(priors_output.groupby('match_group'))
num_anchors_per_resolution = [len(priors[-1]) for priors in by_resolution]
if len(num_anchors_per_resolution) < len(fm_sizes):
print('Some resolution were empty - setting default prior per empty resolution')
curr_match_groups = opt_result.out_priors.match_group.to_list()
curr_prior_number = len(curr_match_groups)
empty_match_groups = list(set(range(len(fm_sizes))) - set(np.unique(curr_match_groups)))
for empty_match_group in empty_match_groups:
prior_size = opt_result.target_image_size.w / fm_sizes[empty_match_group]
new_prior = pd.DataFrame(np.array([empty_match_group, prior_size**2, 1, prior_size, prior_size]).reshape(1, 5),
columns=['match_group', 'area', 'aspect_ratio', 'width', 'height'])
new_prior['index'] = 'prior_{}'.format(curr_prior_number)
new_prior = new_prior.set_index('index')
priors_output = priors_output.append(new_prior)
curr_prior_number += 1
by_resolution.append((empty_match_group, new_prior))
num_anchors_per_resolution.append(1)
Task.current_task().register_artifact('priors_output', priors_output.sort_values('match_group'))
by_resolution = list(priors_output.groupby('match_group'))
boxes = []
priors = []
for i, (fm_size, new_priors) in enumerate(zip(fm_sizes, by_resolution)):
for h, w in product(range(fm_size), repeat=2):
cx = (w + 0.5) * steps[i]
cy = (h + 0.5) * steps[i]
for prior in new_priors[-1].iterrows():
w = prior[-1].width
h = prior[-1].height
boxes.append((cx, cy, w, h))
priors.append((i, w, h))
return torch.Tensor(boxes), torch.Tensor(np.unique(np.array(priors), axis=0)), num_anchors_per_resolution
| [
"torch.Tensor"
] | 1.5.1 | danmalowany/trains-model-zoo | 2091100057afae9593b18ddcefd81b7d46724a96 |
1.9 | import json
import os
import subprocess
import sys
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from time import sleep
from typing import List, Union
import numpy as np
import torch
import torch.nn as nn
def create_logdir(root: Union[str, Path] = None):
if (root is None) or (root == ""):
root = Path.cwd()
else:
root = Path(root)
# When running multiple jobs in parallel (e.g. Slurm) we could get the same
# timestamp so let's allow ourselves to try a few times
for _ in range(10):
try:
timestamp = datetime.now().strftime("%Y-%m-%d-%A-%H-%M-%S")
log_dir = root / "runs" / timestamp
log_dir.mkdir(parents=True)
except FileExistsError:
sleep(1)
continue
else:
break
else:
raise SystemExit("Could not create logdir.")
return log_dir
def save_repo_status(path: Union[str, Path]):
path = Path(path)
with (path / "git_commit.txt").open("w") as f:
subprocess.run(["git", "rev-parse", "HEAD"], stdout=f)
with (path / "workspace_changes.diff").open("w") as f:
subprocess.run(["git", "diff"], stdout=f)
def save_command_line(path: Union[str, Path]):
path = Path(path)
with open(path / "command_line.txt", "w") as f:
f.write("python " + " ".join(sys.argv))
def set_seed(seed: int, allow_nondeterminism: bool):
torch.manual_seed(seed)
np.random.seed(seed)
if allow_nondeterminism is False:
# This can make the training slower
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def unconcatenate(x: torch.Tensor, orig_list: List[torch.Tensor]):
result = []
processed = 0
for ref in orig_list:
result.append(x[processed : processed + ref.numel()].reshape(ref.shape))
processed += ref.numel()
return result
def save_checkpoint(
logdir,
model: torch.nn.Module,
optimiser: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
epoch: int,
max_checkpoints=None,
):
state = {
"model": model.state_dict(),
"optimiser": optimiser.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
}
p = logdir / f"chkpt_epoch_{epoch}.pt"
torch.save(state, p)
if max_checkpoints:
chkpts = sorted(logdir.glob("chkpt_e[0-9]*.pt"), key=os.path.getmtime)
num_unwanted_chckpts = len(chkpts) - max_checkpoints
if num_unwanted_chckpts > 0:
for c in chkpts[0:num_unwanted_chckpts]:
c.unlink()
def load_checkpoint(
path: Union[Path, str],
model: torch.nn.Module,
optimiser: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
):
path = Path(path)
if not path.exists():
raise FileNotFoundError
print(f"🛻 Loading from checkpoint file {path}.")
chkpt = torch.load(path)
model.load_state_dict(chkpt["model"])
print("✅ Loaded the model.")
optimiser.load_state_dict(chkpt["optimiser"])
print("✅ Loaded the optimiser.")
lr_scheduler.load_state_dict(chkpt["lr_scheduler"])
print("✅ Loaded the LR scheduler.")
@contextmanager
def eval_mode(model: nn.Module):
"""
Sets training mode to False and restores it when exiting.
"""
is_training = model.training
try:
model.eval()
yield model
finally:
if is_training:
model.train()
class Hyperparameters:
def __init__(self, **kwargs):
self.from_dict(kwargs)
def from_argparse(self, args):
self.from_dict(args.__dict__)
def from_dict(self, d):
for k, v in d.items():
setattr(self, k, v)
def as_dict(self):
return {k: getattr(self, k) for k in self.__dict__}
def from_json(self, j):
d = json.loads(j)
return self.from_dict(d)
def to_json(self, path: Path):
j = json.dumps(self.as_dict(), indent=4, sort_keys=True)
path.write_text(j)
def __contains__(self, k):
return k in self.__dict__
def __str__(self):
s = [f"{k}={v}" for k, v in self.as_dict().items()]
return ",".join(s)
| [
"torch.manual_seed",
"torch.save",
"torch.load"
] | 1.9.1 | mil-ad/prospr | a92177989f4480f1f2b43a48b3e18a6597ebba6d |
1.9 | import torch.nn as nn
from ..core.primitives import AbstractPrimitive
class ConvBNReLU(AbstractPrimitive):
def __init__(self, C_in, C_out, kernel_size, stride=1, affine=False):
super().__init__(locals())
pad = 0 if stride == 1 and kernel_size == 1 else 1
self.op = nn.Sequential(
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=pad, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
nn.ReLU(inplace=False),
)
def forward(self, x, edge_data):
return self.op(x)
def get_embedded_ops(self):
return None
class DepthwiseConv(AbstractPrimitive):
"""
Depthwise convolution
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
super().__init__(locals())
self.op = nn.Sequential(
nn.Conv2d(
C_in,
C_in,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=C_in,
bias=False,
),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
)
def forward(self, x, edge_data):
return self.op(x)
def get_embedded_ops(self):
return None
| [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
] | 1.9.0 | NUDTNASLab/NASLib | 451cdb4738a7c1501ac62f78727c6244039dc657 |
1.0 | from gym import spaces
import multiprocessing.dummy as mp
import multiprocessing
import numpy as np
import os
import torch
import torch
import torch.nn as nn
from torch.nn import Parameter, ModuleList
import torch.nn.functional as F
from evkit.rl.utils import init, init_normc_
from evkit.utils.misc import is_cuda
from evkit.preprocess import transforms
import pickle as pkl
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
################################
# Inverse Models
# Predict s_{t+1} | s_t, a_t
################################
class ForwardModel(nn.Module):
def __init__(self, state_shape, action_shape, hidden_size):
super().__init__()
self.fc1 = init_(nn.Linear(state_shape + action_shape[1], hidden_size))
self.fc2 = init_(nn.Linear(hidden_size, state_shape))
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
################################
# Inverse Models
# Predict a_t | s_t, s_{t+1}
################################
class InverseModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.fc1 = init_(nn.Linear(input_size * 2, hidden_size))
# Note to stoip gradient
self.fc2 = init_(nn.Linear(hidden_size, output_size))
def forward(self, phi_t, phi_t_plus_1):
x = torch.cat([phi_t, phi_t_plus_1], 1)
x = F.relu(self.fc1(x))
logits = self.fc2(x)
return logits
# ainvprobs = nn.softmax(logits, dim=-1) | [
"torch.nn.init.calculate_gain",
"torch.cat",
"torch.nn.Linear",
"torch.nn.init.constant_"
] | 1.0.1 | jozhang97/Side-tuning | dea345691fb7ee0230150fe56ddd644efdffa6ac |
1.0 | import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class _CustomDataParallel(nn.Module):
def __init__(self, model, device_ids):
super(_CustomDataParallel, self).__init__()
self.model = nn.DataParallel(model, device_ids=device_ids)
self.model.to(device)
num_devices = torch.cuda.device_count() if device_ids is None else len(device_ids)
print(f"{type(model)} using {num_devices} GPUs!")
def forward(self, *input, **kwargs):
return self.model(*input, **kwargs)
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.model.module, name) | [
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.cuda.device_count"
] | 1.0.1 | jozhang97/Side-tuning | dea345691fb7ee0230150fe56ddd644efdffa6ac |
1.0 | import torch.nn as nn
from torch.nn import Parameter, ModuleList
import torch.nn.functional as F
import torch
import multiprocessing
import numpy as np
import os
from gym import spaces
from torchvision.models import resnet18
from evkit.rl.utils import init, init_normc_
from evkit.preprocess import transforms
import torchvision as vision
from evkit.models.architectures import FrameStacked, Flatten, atari_conv
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
nn.init.calculate_gain('relu'))
N_CHANNELS = 3
def getNChannels():
return N_CHANNELS
########################
# SRL
########################
class BaseModelSRL(nn.Module):
"""
Base Class for a SRL network
It implements a getState method to retrieve a state from observations
"""
def __init__(self):
super(BaseModelSRL, self).__init__()
def getStates(self, observations):
"""
:param observations: (th.Tensor)
:return: (th.Tensor)
"""
return self.forward(observations)
def forward(self, x):
raise NotImplementedError
class BaseModelAutoEncoder(BaseModelSRL):
"""
Base Class for a SRL network (autoencoder family)
It implements a getState method to retrieve a state from observations
"""
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512):
super(BaseModelAutoEncoder, self).__init__()
self.output_size = output_size
self.n_frames = 4
self.n_frames = n_frames
self.output_size = output_size
self.n_map_channels = n_map_channels
self.use_target = use_target
self.use_map = n_map_channels > 0
if self.use_map:
self.map_tower = nn.Sequential(
atari_conv(self.n_frames * self.n_map_channels),
nn.Conv2d(32, 64, kernel_size=4, stride=1), #, padding=3, bias=False),
nn.ReLU(inplace=True),
)
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
# Inspired by ResNet:
# conv3x3 followed by BatchNorm2d
self.encoder_conv = nn.Sequential(
# 224x224xN_CHANNELS -> 112x112x64
nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64
conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64
conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64
)
self.decoder_conv = nn.Sequential(
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 13x13x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 27x27x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 55x55x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2), # 111x111x64
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, getNChannels(), kernel_size=4, stride=2), # 224x224xN_CHANNELS
)
self.encoder = FrameStacked(self.encoder_conv, self.n_frames)
self.conv1 = nn.Conv2d(self.n_frames * (64 + self.target_channels), 64, 3, stride=1) # c4 s 4
self.flatten = Flatten()
self.fc1 = init_(nn.Linear(64 * 4 * 4 * (self.use_map) + 64 * 4 * 4 * (1), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
def getStates(self, observations):
"""
:param observations: (th.Tensor)
:return: (th.Tensor)
"""
return self.encode(observations)
def encode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
# raise NotImplementedError
self.encoder_conv(x)
def decode(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
# raise NotImplementedError
self.decoder_conv(x)
def forward(self, x):
"""
:param x: (th.Tensor)
:return: (th.Tensor)
"""
x_taskonomy = x['taskonomy']
if self.use_target:
x_taskonomy = torch.cat([x_taskonomy, x["target"]], dim=1)
x_taskonomy = F.relu(self.conv1(x_taskonomy))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_taskonomy = torch.cat([x_map, x_taskonomy], dim=1)
x = self.flatten(x_taskonomy)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
encoded = self.encode(x)
# decoded = self.decode(encoded).view(input_shape)
return encoded #, decoded
def conv3x3(in_planes, out_planes, stride=1):
""""
From PyTorch Resnet implementation
3x3 convolution with padding
:param in_planes: (int)
:param out_planes: (int)
:param stride: (int)
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def srl_features_transform(task_path, dtype=np.float32):
''' rescale_centercrop_resize
Args:
output_size: A tuple CxWxH
dtype: of the output (must be np, not torch)
Returns:
a function which returns takes 'env' and returns transform, output_size, dtype
'''
_rescale_thunk = transforms.rescale_centercrop_resize((3, 224, 224))
if task_path != 'pixels_as_state':
# net = TaskonomyEncoder().cuda()
net = nn.Sequential(
# 224x224xN_CHANNELS -> 112x112x64
nn.Conv2d(getNChannels(), 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1), # 56x56x64
conv3x3(in_planes=64, out_planes=64, stride=1), # 56x56x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2), # 27x27x64
conv3x3(in_planes=64, out_planes=64, stride=2), # 14x14x64
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2) # 6x6x64
).cuda()
net.eval()
if task_path != 'None':
checkpoint = torch.load(task_path)
# checkpoint = {k.replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k}
checkpoint = {k.replace('model.conv_layers.', '').replace('model.encoder_conv.', ''): v for k, v in checkpoint.items() if 'encoder_conv' in k or 'conv_layers' in k}
net.load_state_dict(checkpoint)
def encode(x):
if task_path == 'pixels_as_state':
return x
with torch.no_grad():
return net(x)
def _features_transform_thunk(obs_space):
rescale, _ = _rescale_thunk(obs_space)
def pipeline(x):
# x = rescale(x).view(1, 3, 224, 224)
x = torch.Tensor(x).cuda()
x = encode(x)
return x.cpu()
if task_path == 'pixels_as_state':
raise NotImplementedError
return pixels_as_state_pipeline, spaces.Box(-1, 1, (8, 16, 16), dtype)
else:
return pipeline, spaces.Box(-1, 1, (64, 6, 6), dtype)
return _features_transform_thunk
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load",
"torch.nn.init.calculate_gain",
"torch.Tensor"
] | 1.0.1 | jozhang97/Side-tuning | dea345691fb7ee0230150fe56ddd644efdffa6ac |
1.0 | from collections import namedtuple, Counter, defaultdict
from tlkit.data.sequential_tasks_dataloaders import ConcatenatedDataLoader, CyclingDataLoader, ErrorPassingConcatenatedDataLoader, ErrorPassingCyclingDataLoader
from tlkit.utils import SINGLE_IMAGE_TASKS, TASKS_TO_CHANNELS
import torch
import torch.utils.data as utils
import torchvision.transforms as transforms
import torchvision.datasets as ds
import torch.utils.data as data
from tqdm import tqdm
from PIL import Image, ImageFile
import numpy as np
import os
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import warnings
from tlkit.data.img_transforms import default_loader, get_transform
from tlkit.data.splits import SPLIT_TO_NUM_IMAGES, taskonomy_no_midlevel as split_taskonomy_no_midlevel
TRAIN_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['train']
VAL_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['val']
TEST_BUILDINGS = split_taskonomy_no_midlevel['fullplus']['test']
ImageFile.LOAD_TRUNCATED_IMAGES = True # TODO Test this
class TaskonomyData(data.Dataset):
'''
Loads data for the Taskonomy dataset.
This expects that the data is structured
/path/to/data/
rgb/
modelk/
point_i_view_j.png
...
depth_euclidean/
... (other tasks)
If one would like to use pretrained representations, then they can be added into the directory as:
/path/to/data/
rgb_encoding/
modelk/
point_i_view_j.npy
...
Basically, any other folder name will work as long as it is named the same way.
'''
def __init__(self, data_path,
tasks,
buildings,
transform=None,
load_to_mem=False,
zip_file_name=False,
max_images=None):
'''
data_path: Path to data
tasks: Which tasks to load. Any subfolder will work as long as data is named accordingly
buildings: Which models to include. See `splits.taskonomy`
transform: one transform per task.
Note: This assumes that all images are present in all (used) subfolders
'''
self.return_tuple = True
if isinstance(tasks, str):
tasks = [tasks]
transform = [transform]
self.return_tuple = False
self.buildings = buildings
self.cached_data = {}
self.data_path = data_path
self.load_to_mem = load_to_mem
self.tasks = tasks
self.zip_file_name = zip_file_name
self.urls = {task: make_dataset(os.path.join(data_path, task), buildings, max_images)
for task in tasks}
# Validate number of images
n_images_task = [(len(obs), task) for task, obs in self.urls.items()]
print("\t" + " | ".join(["{}: {}".format(k, task) for task, k in n_images_task]))
if max(n_images_task)[0] != min(n_images_task)[0]:
print("Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \n\t{}".format(
max(n_images_task)[0], min(n_images_task)[0], "\n\t".join([str(t) for t in n_images_task])))
# count number of frames per building per task
all_buildings = defaultdict(dict)
for task, obs in self.urls.items():
c = Counter([url.split("/")[-2] for url in obs])
for building in c:
all_buildings[building][task] = c[building]
# find where the number of distinct counts is more than 1
print('Removing data from the following buildings')
buildings_to_remove = []
for b, count in all_buildings.items():
if len(set(list(count.values()))) > 1:
print(f"\t{b}:", count)
buildings_to_remove.append(b)
# [(len(obs), task) for task, obs in self.urls.items()]
# redo the loading with fewer buildings
buildings_redo = [b for b in buildings if b not in buildings_to_remove]
self.urls = {task: make_dataset(os.path.join(data_path, task), buildings_redo)
for task in tasks}
n_images_task = [(len(obs), task) for task, obs in self.urls.items()]
print("\t" + " | ".join(["{}: {}".format(k, task) for task, k in n_images_task]))
assert max(n_images_task)[0] == min(n_images_task)[0], \
"Each task must have the same number of images. However, the max != min ({} != {}). Number of images per task is: \n\t{}".format(
max(n_images_task)[0], min(n_images_task)[0], "\n\t".join([str(t) for t in n_images_task]))
self.size = max(n_images_task)[0]
# Perhaps load some things into main memory
if load_to_mem:
print('Writing activations to memory')
for t, task in zip(transform, tasks):
self.cached_data[task] = [None] * len(self)
for i, url in enumerate(self.urls[task]):
self.cached_data[task][i] = t(default_loader(url))
self.cached_data[task] = torch.stack(self.cached_data[task])
# self.cached_data = torch.stack(self.cached_data)
print('Finished writing some activations to memory')
self.transform = transform
def __len__(self):
return self.size
def __getitem__(self, index):
fpaths = [self.urls[task][index] for task in self.tasks]
if self.load_to_mem:
result = tuple([self.cached_data[task][index] for task in self.tasks])
else:
result = [default_loader(path) for path in fpaths]
if self.transform is not None:
# result = [transform(tensor) for transform, tensor in zip(self.transform, result)]
result_post = []
for i, (transform, tensor) in enumerate(zip(self.transform, result)):
try:
result_post.append(transform(tensor))
except Exception as e:
print(self.tasks[i], transform, tensor)
raise e
result = result_post
# handle 2 channel outputs
for i in range(len(self.tasks)):
task = self.tasks[i]
base_task = [t for t in SINGLE_IMAGE_TASKS if t in task]
if len(base_task) == 0:
continue
else:
base_task = base_task[0]
num_channels = TASKS_TO_CHANNELS[base_task]
if 'decoding' in task and result[i].shape[0] != num_channels:
assert torch.sum(result[i][num_channels:,:,:]) < 1e-5, 'unused channels should be 0.'
result[i] = result[i][:num_channels,:,:]
if self.zip_file_name:
result = tuple(zip(fpaths, result))
if self.return_tuple:
return result
else:
return result[0]
def make_dataset(dir, folders=None, max_images=None):
# folders are building names. If None, get all the images (from both building folders and dir)
has_reached_capacity = lambda images, max_images: not max_images is None and len(images) >= max_images
images = []
dir = os.path.expanduser(dir)
if not os.path.isdir(dir):
assert "bad directory"
for subfolder in sorted(os.listdir(dir)):
subfolder_path = os.path.join(dir, subfolder)
if os.path.isdir(subfolder_path) and (folders is None or subfolder in folders):
for fname in sorted(os.listdir(subfolder_path)):
path = os.path.join(subfolder_path, fname)
if not has_reached_capacity(images, max_images):
images.append(path)
# If folders/buildings are not specified, use images in dir
if folders is None and os.path.isfile(subfolder_path) and not has_reached_capacity(images, max_images):
images.append(subfolder_path)
return images
def get_dataloaders(data_path,
tasks,
batch_size=64,
batch_size_val=4,
zip_file_name=False,
train_folders=TRAIN_BUILDINGS,
val_folders=VAL_BUILDINGS,
test_folders=TEST_BUILDINGS,
transform=None,
num_workers=0,
load_to_mem=False,
pin_memory=False,
max_images=None):
"""
:param data_path: directory that data is stored at
:param tasks: names of subdirectories to return observations from
:param batch_size:
:param zip_file_name: when returning an observation, this will zip the fpath to it. E.g. (/path/to/img.png, OBS)
:param train_folders: in a big data dir, which subfolders contain our training data
:param val_folders: in a big data dir, which subfolders contain our val data
:param max_images: maximum number of images in any dataset
:return: dictionary of dataloaders
"""
if transform is None:
if isinstance(tasks, str):
transform = get_transform(tasks)
else:
transform = [get_transform(task) if len(task.split(' ')) == 1 else get_transform(*task.split(' ')) for task in tasks]
tasks = [t.split(' ')[0] for t in tasks] # handle special data operations
if isinstance(train_folders, str):
train_folders = split_taskonomy_no_midlevel[train_folders]['train']
if isinstance(val_folders, str):
val_folders = split_taskonomy_no_midlevel[val_folders]['val']
if isinstance(test_folders, str):
test_folders = split_taskonomy_no_midlevel[test_folders]['test']
dataloaders = {}
print(f"Taskonomy dataset TRAIN folders: {train_folders}")
dataset = TaskonomyData(data_path, tasks, buildings=train_folders,
transform=transform, zip_file_name=zip_file_name,
load_to_mem=load_to_mem, max_images=max_images)
if len(dataset) == 0:
print(f'\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['train'] = dataloader
print(f"Taskonomy dataset VAL folders: {val_folders}")
dataset = TaskonomyData(data_path, tasks, buildings=val_folders,
transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)
if len(dataset) == 0:
print(f'\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['val'] = dataloader
print(f"Taskonomy dataset TEST folders: {test_folders}")
dataset = TaskonomyData(data_path, tasks, buildings=test_folders,
transform=transform, zip_file_name=zip_file_name, load_to_mem=load_to_mem, max_images=max_images)
if len(dataset) == 0:
print(f'\tNO IMAGES FOUND for tasks {tasks} at path {data_path}')
dataloader = DataLoader(dataset, batch_size=batch_size_val, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
dataloaders['test'] = dataloader
return dataloaders
def get_lifelong_dataloaders(data_path,
sources,
targets,
masks,
epochs_per_task=5,
epochs_until_cycle=0,
split='fullplus',
batch_size=64,
batch_size_val=4,
transform=None,
num_workers=0,
load_to_mem=False,
pin_memory=False,
speedup_no_rigidity=False,
max_images_per_task=None):
phases = ['train', 'val', 'test']
dataloaders = {phase: [] for phase in phases}
if isinstance(masks, bool):
masks = [masks] * len(sources)
masks = [['mask_valid'] if mask else [] for mask in masks]
for i, (source, target, mask) in enumerate(zip(sources, targets, masks)):
print(f'# Task {i} dataloader: {source} -> {target}')
tasks = source + target + mask
dl = get_dataloaders(
data_path,
tasks,
batch_size=batch_size,
batch_size_val=batch_size_val,
train_folders=split,
val_folders=split,
test_folders=split,
transform=transform,
num_workers=num_workers,
load_to_mem=load_to_mem,
pin_memory=pin_memory,
max_images=max_images_per_task,
)
for phase in phases:
dataloaders[phase].append(dl[phase])
if speedup_no_rigidity:
# For methods that do not forget (no intransigence) by construction.
# In validation, we only compute task performance for just-trained task and next-to-be-trained task
epoch_lengths = [len(dl.dataset) for dl in dataloaders['val']]
epoch_length = min(epoch_lengths) if min(epoch_lengths) == max(epoch_lengths) else None
dl_just_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=1, start_dl=0,
epoch_length_per_dl=epoch_length)
dl_next_to_be_trained = CyclingDataLoader(dataloaders['val'], epochs_until_cycle=0, start_dl=0,
epoch_length_per_dl=epoch_length)
dataloaders['val'] = ErrorPassingConcatenatedDataLoader([dl_just_trained, dl_next_to_be_trained], zip_idx=False)
else:
dataloaders['val'] = ErrorPassingConcatenatedDataLoader(dataloaders['val'])
train_epoch_length = SPLIT_TO_NUM_IMAGES[split] if split is not None else min([len(dl.dataset) for dl in dataloaders['train']])
dataloaders['train'] = ErrorPassingCyclingDataLoader(dataloaders['train'], epoch_length_per_dl=epochs_per_task * train_epoch_length, epochs_until_cycle=epochs_until_cycle)
dataloaders['test'] = ErrorPassingConcatenatedDataLoader(dataloaders['test'])
return dataloaders
| [
"torch.stack",
"torch.utils.data.DataLoader",
"torch.sum"
] | 1.0.1 | jozhang97/Side-tuning | dea345691fb7ee0230150fe56ddd644efdffa6ac |
1.0 | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='retinanet',
version='0.1',
description='Fast and accurate single shot object detector',
author = 'NVIDIA Corporation',
author_email='[email protected]',
packages=['retinanet', 'retinanet.backbones'],
ext_modules=[CUDAExtension('retinanet._C',
['csrc/extensions.cpp', 'csrc/engine.cpp', 'csrc/cuda/decode.cu', 'csrc/cuda/nms.cu'],
extra_compile_args={
'cxx': ['-std=c++14', '-O2', '-Wall'],
'nvcc': [
'-std=c++14', '--expt-extended-lambda', '--use_fast_math', '-Xcompiler', '-Wall',
'-gencode=arch=compute_50,code=sm_50', '-gencode=arch=compute_52,code=sm_52',
'-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61',
'-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_72,code=sm_72',
'-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_75,code=compute_75'
],
},
library_dirs= ['/usr/local/lib/'],
libraries=['nvinfer', 'nvinfer_plugin', 'nvonnxparser', 'opencv_core', 'opencv_highgui', 'opencv_imgproc', 'opencv_imgcodecs'])
],
cmdclass={'build_ext': BuildExtension.with_options(no_python_abi_suffix=True)},
install_requires=[
'torch>=1.0.0a0',
#'torchvision',
'apex @ git+https://github.com/NVIDIA/apex',
'pycocotools @ git+https://github.com/nvidia/cocoapi.git#subdirectory=PythonAPI',
'pillow>=6.2.2',
'requests',
],
entry_points = {'console_scripts': ['retinanet=retinanet.main:main']}
)
| [
"torch.utils.cpp_extension.CUDAExtension",
"torch.utils.cpp_extension.BuildExtension.with_options"
] | 1.0.0 | abhinavagarwalla/MAL-inference-deepsort | 3dc2010f76dc249e60d3e970247faa7e7c5ffca6 |
1.0 | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.utils.checkpoint
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
SequenceClassifierOutputWithPast,
)
from ...modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
from ...utils import logging
from ...utils.model_parallel_utils import assert_device_map, get_device_map
from .configuration_gpt2 import GPT2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "GPT2Config"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"gpt2",
"gpt2-medium",
"gpt2-large",
"gpt2-xl",
"distilgpt2",
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
]
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.is_cross_attention = is_cross_attention
if self.is_cross_attention:
self.c_attn = Conv1D(2 * n_state, nx)
self.q_attn = Conv1D(n_state, nx)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
mask = self.bias[:, :, ns - nd : ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = (torch.matmul(w, v),)
if output_attentions:
outputs += (w,)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
assert hasattr(
self, "q_attn"
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key.transpose(-2, -1), value) # transpose to have same shapes
else:
present = None
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return (a, present) + attn_outputs[1:] # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
hidden_size = config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = Attention(hidden_size, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
attn_outputs = self.attn(
self.ln_1(hidden_states),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + hidden_states
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attn_outputs = self.crossattention(
self.ln_cross_attn(hidden_states),
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = hidden_states + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
# residual connection
hidden_states = hidden_states + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class GPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPT2Config
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
is_parallelizable = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class GPT2DoubleHeadsModelOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
Multiple choice classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of length :obj:`config.n_layers`, containing tuples of tensors of shape :obj:`(batch_size, num_heads,
sequence_length, embed_size_per_head)`).
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mc_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mc_logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
GPT2_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
GPT2_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
:obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
passed as ``input_ids``.
Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers`):
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
:obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
have their past given to this model should not be passed as ``input_ids`` as they have already been
computed.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
:obj:`past_key_values`).
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
PARALLELIZE_DOCSTRING = r"""
This is an experimental feature and is a subject to change at a moment's notice.
Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
it will evenly distribute blocks across all devices.
Args:
device_map (:obj:`Dict[int, list]`, optional, defaults to None):
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
automatically mapped to the first device (for esoteric reasons). That means that the first device should
have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
following number of attention modules:
- gpt2: 12
- gpt2-medium: 24
- gpt2-large: 36
- gpt2-xl: 48
Example::
# Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
model = GPT2LMHeadModel.from_pretrained('gpt2-xl')
device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}
model.parallelize(device_map)
"""
DEPARALLELIZE_DOCSTRING = r"""
Moves the model to cpu from a model parallel state.
Example::
# On a 4 GPU machine with gpt2-large:
model = GPT2LMHeadModel.from_pretrained('gpt2-large')
device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],
1: [8, 9, 10, 11, 12, 13, 14, 15],
2: [16, 17, 18, 19, 20, 21, 22, 23],
3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}
model.parallelize(device_map) # Splits the model across several devices
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
"""
class GPT2Embeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.emb3 = nn.Embedding(config.emb3_size, config.n_embd)
self.emb4 = nn.Embedding(config.emb4_size, config.n_embd)
# token type embedding also -> wte
self.drop = nn.Dropout(config.embd_pdrop)
# layer norm is in the robertamodel
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, emb3_ids=None, emb4_ids=None, inputs_embeds=None
):
# some processing of forward input is done on Model class (not necessary to move here i think?)
# tok emb + pos emb
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
# tok type emb
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
# third emb
if emb3_ids is not None:
emb3_embeds = self.emb3(emb3_ids)
hidden_states = hidden_states + emb3_embeds
# fourth emb
if emb4_ids is not None:
emb4_embeds = self.emb4(emb4_ids)
hidden_states = hidden_states + emb4_embeds
# fith emb
# dropout
hidden_states = self.drop(hidden_states)
return hidden_states
@add_start_docstrings(
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING,
)
class GPT2Model(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
# NEW
self.embeddings = GPT2Embeddings(config)
# NEW
# self.wte = nn.Embedding(config.vocab_size, config.n_embd)
# self.wpe = nn.Embedding(config.n_positions, config.n_embd)
# self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
# Check validity of device_map
self.device_map = (
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
)
assert_device_map(self.device_map, len(self.h))
self.model_parallel = True
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
self.last_device = "cuda:" + str(max(self.device_map.keys()))
# self.wte = self.wte.to(self.first_device)
# self.wpe = self.wpe.to(self.first_device)
self.embeddings = self.embeddings.to(self.first_device)
# Load onto devices
for k, v in self.device_map.items():
for block in v:
cuda_device = "cuda:" + str(k)
self.h[block] = self.h[block].to(cuda_device)
# ln_f to last
self.ln_f = self.ln_f.to(self.last_device)
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.model_parallel = False
self.device_map = None
self.first_device = "cpu"
self.last_device = "cpu"
# self.wte = self.wte.to("cpu")
# self.wpe = self.wpe.to("cpu")
for index in range(len(self.h)):
self.h[index] = self.h[index].to("cpu")
self.ln_f = self.ln_f.to("cpu")
torch.cuda.empty_cache()
def get_input_embeddings(self):
return self.embeddings.wte
def set_input_embeddings(self, new_embeddings):
self.embeddings.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="gpt2",
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
emb3_ids=None,
emb4_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
# if inputs_embeds is None:
# inputs_embeds = self.wte(input_ids)
# position_embeds = self.wpe(position_ids)
# hidden_states = inputs_embeds + position_embeds
# if token_type_ids is not None:
# token_type_embeds = self.wte(token_type_ids)
# hidden_states = hidden_states + token_type_embeds
# hidden_states = self.drop(hidden_states)
# NEW
hidden_states = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
emb3_ids=emb3_ids,
emb4_ids=emb4_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
# NEW
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
GPT2_START_DOCSTRING,
)
class GPT2LMHeadModel(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings(PARALLELIZE_DOCSTRING)
def parallelize(self, device_map=None):
self.device_map = (
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
if device_map is None
else device_map
)
assert_device_map(self.device_map, len(self.transformer.h))
self.transformer.parallelize(self.device_map)
self.lm_head = self.lm_head.to(self.transformer.first_device)
self.model_parallel = True
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
def deparallelize(self):
self.transformer.deparallelize()
self.transformer = self.transformer.to("cpu")
self.lm_head = self.lm_head.to("cpu")
self.model_parallel = False
torch.cuda.empty_cache()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="gpt2",
output_type=CausalLMOutputWithCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
emb3_ids=None,
emb4_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
emb3_ids=emb3_ids,
emb4_ids=emb4_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.transformer.first_device)
hidden_states = hidden_states.to(self.lm_head.weight.device)
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
cross_attentions=transformer_outputs.cross_attentions,
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
input embeddings, the classification head takes as input the input of a specified classification token index in the
input sequence).
""",
GPT2_START_DOCSTRING,
)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
emb3_ids=None,
emb4_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
1[``.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-1, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see
`input_ids` above)
Return:
Example::
>>> import torch
>>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
>>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
>>> # Add a [CLS] to the vocabulary (we should train it also!)
>>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})
>>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
>>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
>>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
>>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.logits
>>> mc_logits = outputs.mc_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
emb3_ids=emb3_ids,
emb4_ids=emb4_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
mc_loss = None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
lm_loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return GPT2DoubleHeadsModelOutput(
loss=lm_loss,
mc_loss=mc_loss,
logits=lm_logits,
mc_logits=mc_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
@add_start_docstrings(
"""
The GPT2 Model transformer with a sequence classification head on top (linear layer).
:class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as
other causal models (e.g. GPT-1) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
:obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
the last value in each row of the batch).
""",
GPT2_START_DOCSTRING,
)
class GPT2ForSequenceClassification(GPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.init_weights()
# Model parallel
self.model_parallel = False
self.device_map = None
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="microsoft/dialogrpt",
output_type=SequenceClassifierOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
emb3_ids=None,
emb4_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
emb3_ids=emb3_ids,
emb4_ids=emb4_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[range(batch_size), sequence_lengths]
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.ne",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.nn.Softmax",
"torch.tensor",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.from_numpy",
"torch.nn.Embedding"
] | 1.0 | KanaCS/transformers | d4ba8ec0d56a332fdc66d0339db4dfe1a9af7af0 |
1.7 | import torch
import torch.nn as nn
from pytorch_models.MF import BaseMF
class GMF(BaseMF):
def __init__(self, hyper_params):
super(GMF, self).__init__(hyper_params)
self.final = nn.Linear(hyper_params['latent_size'], 1)
self.dropout = nn.Dropout(hyper_params['dropout'])
def get_score(self, user_id, item_id):
# For the FM
user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)
item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)
# Embed Latent space
user = self.dropout(self.user_embedding(user_id.view(-1)))
item = self.dropout(self.item_embedding(item_id.view(-1)))
joint = user * item
rating = self.final(joint)[:, 0].view(user_id.shape) # [bsz]
return user_bias + item_bias + self.global_bias + rating
class MLP(BaseMF):
def __init__(self, hyper_params):
super(MLP, self).__init__(hyper_params)
self.project = nn.Sequential(
nn.Dropout(hyper_params['dropout']),
nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),
nn.ReLU(),
nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])
)
self.final = nn.Linear(hyper_params['latent_size'], 1)
self.dropout = nn.Dropout(hyper_params['dropout'])
def get_score(self, user_id, item_id):
# For the FM
user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)
item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)
# Embed Latent space
user = self.dropout(self.user_embedding(user_id.view(-1)))
item = self.dropout(self.item_embedding(item_id.view(-1)))
joint = torch.cat([ user, item ], dim = -1)
joint = self.project(joint)
rating = self.final(joint)[:, 0].view(user_id.shape)
return user_bias + item_bias + self.global_bias + rating
class NeuMF(BaseMF):
def __init__(self, hyper_params):
super(NeuMF, self).__init__(hyper_params, keep_gamma = False)
self.gmf_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])
self.gmf_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])
self.mlp_user_embedding = nn.Embedding(hyper_params['total_users'], hyper_params['latent_size'])
self.mlp_item_embedding = nn.Embedding(hyper_params['total_items'], hyper_params['latent_size'])
self.project = nn.Sequential(
nn.Dropout(hyper_params['dropout']),
nn.Linear(2 * hyper_params['latent_size'], hyper_params['latent_size']),
nn.ReLU(),
nn.Linear(hyper_params['latent_size'], hyper_params['latent_size'])
)
self.final = nn.Linear(2 * hyper_params['latent_size'], 1)
self.dropout = nn.Dropout(hyper_params['dropout'])
def init(self, gmf_model, mlp_model):
with torch.no_grad():
self.gmf_user_embedding.weight.data = gmf_model.user_embedding.weight.data
self.gmf_item_embedding.weight.data = gmf_model.item_embedding.weight.data
self.mlp_user_embedding.weight.data = mlp_model.user_embedding.weight.data
self.mlp_item_embedding.weight.data = mlp_model.item_embedding.weight.data
for i in range(len(self.project)):
try:
self.project[i].weight.data = mlp_model.project[i].weight.data
self.project[i].bias.data = mlp_model.project[i].bias.data
except: pass
self.final.weight.data = torch.cat([ gmf_model.final.weight.data, mlp_model.final.weight.data ], dim = -1)
self.final.bias.data = 0.5 * (gmf_model.final.bias.data + mlp_model.final.bias.data)
self.user_bias.data = 0.5 * (gmf_model.user_bias.data + mlp_model.user_bias.data)
self.item_bias.data = 0.5 * (gmf_model.item_bias.data + mlp_model.item_bias.data)
def get_score(self, user_id, item_id):
# For the FM
user_bias = self.user_bias.gather(0, user_id.view(-1)).view(user_id.shape)
item_bias = self.item_bias.gather(0, item_id.view(-1)).view(item_id.shape)
# GMF Part
user = self.dropout(self.gmf_user_embedding(user_id.view(-1))) # [bsz x 32]
item = self.dropout(self.gmf_item_embedding(item_id.view(-1))) # [bsz x 32]
gmf_joint = user * item
# MLP Part
user = self.dropout(self.mlp_user_embedding(user_id.view(-1))) # [bsz x 32]
item = self.dropout(self.mlp_item_embedding(item_id.view(-1))) # [bsz x 32]
mlp_joint = torch.cat([ user, item ], dim = -1)
mlp_joint = self.project(mlp_joint)
# NeuMF
final = torch.cat([ gmf_joint, mlp_joint ], dim = -1)
rating = self.final(final)[:, 0].view(user_id.shape) # [bsz]
return user_bias + item_bias + self.global_bias + rating
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.no_grad",
"torch.nn.ReLU",
"torch.nn.Embedding"
] | 1.7.0 | noveens/sampling_cf | e135819b1e7310ee58edbbd138f303e5240a2619 |
1.0 | """
This is an implementation of R2D2(Soft Actor Critic ver).
See https://openreview.net/pdf?id=r1lyTjAqYX and https://arxiv.org/abs/1801.01290
"""
import torch
import torch.nn as nn
from machina import loss_functional as lf
from machina import logger
from machina.traj import traj_functional as tf
def train(traj,
pol, qfs, targ_qfs, log_alpha,
optim_pol, optim_qfs, optim_alpha,
epoch, batch_size, seq_length, burn_in_length, # optimization hypers
tau, gamma, sampling, reparam=True,
log_enable=True,
):
"""
Train function for soft actor critic.
Parameters
----------
traj : Traj
Off policy trajectory.
pol : Pol
Policy.
qfs : list of SAVfunction
Q function.
targ_qfs : list of SAVfunction
Target Q function.
log_alpha : torch.Tensor
Temperature parameter of entropy.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_qfs : list of torch.optim.Optimizer
Optimizer for Q function.
optim_alpha : torch.optim.Optimizer
Optimizer for alpha.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
seq_length : int
Length of batches.
burn_in_length : int
Length of batches for burn-in.
tau : float
Target updating rate.
gamma : float
Discounting rate.
sampling : int
Number of samping in calculating expectation.
reparam : bool
log_enable: bool
If True, enable logging
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
_qf_losses = []
alpha_losses = []
if log_enable:
logger.log("Optimizing...")
for batch, start_indices in traj.prioritized_random_batch_rnn(batch_size, seq_length, epoch, return_indices=True):
batch, pol_loss, qf_losses, alpha_loss, td_losses = lf.r2d2_sac(
pol, qfs, targ_qfs, log_alpha, batch, gamma, sampling, burn_in_length, reparam)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
for optim_qf, qf_loss in zip(optim_qfs, qf_losses):
optim_qf.zero_grad()
qf_loss.backward()
optim_qf.step()
optim_alpha.zero_grad()
alpha_loss.backward()
optim_alpha.step()
for qf, targ_qf in zip(qfs, targ_qfs):
for q, targ_q in zip(qf.parameters(), targ_qf.parameters()):
targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())
pol_losses.append(pol_loss.detach().cpu().numpy())
_qf_losses.append(
(sum(qf_losses) / len(qf_losses)).detach().cpu().numpy())
alpha_losses.append(alpha_loss.detach().cpu().numpy())
# update seq_pris
train_length = seq_length - burn_in_length
for i in range(batch_size):
start = start_indices[i] + burn_in_length
seq_indices = torch.arange(start, start+train_length-1)
traj = tf.update_pris(
traj, td_losses[:, i], seq_indices, update_epi_pris=True, seq_length=seq_length)
if log_enable:
logger.log("Optimization finished!")
return dict(
PolLoss=pol_losses,
QfLoss=_qf_losses,
AlphaLoss=alpha_losses
)
| [
"torch.arange"
] | 1.0.1 | krish-dx/machina | f93bb6f5aca1feccd71fc509bd6370d2015e2d85 |
1.4 | from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDA_HOME
from torch.utils.cpp_extension import CppExtension, CUDAExtension
# In any case, include the CPU version
modules = [
CppExtension('torchsearchsorted.cpu',
['src/cpu/searchsorted_cpu_wrapper.cpp']),
]
# If nvcc is available, add the CUDA extension
if CUDA_HOME:
modules.append(
CUDAExtension('torchsearchsorted.cuda',
['src/cuda/searchsorted_cuda_wrapper.cpp',
'src/cuda/searchsorted_cuda_kernel.cu'])
)
tests_require = [
'pytest',
]
# Now proceed to setup
setup(
name='torchsearchsorted',
version='1.1',
description='A searchsorted implementation for pytorch',
keywords='searchsorted',
author='Antoine Liutkus',
author_email='[email protected]',
packages=find_packages(where='src'),
package_dir={"": "src"},
ext_modules=modules,
tests_require=tests_require,
extras_require={
'test': tests_require,
},
cmdclass={
'build_ext': BuildExtension
}
)
| [
"torch.utils.cpp_extension.CUDAExtension",
"torch.utils.cpp_extension.CppExtension"
] | 1.4.0 | small-zeng/nerf-pytorch | 59b5fc655e1c22cd42dfa4a1617ba6feb8ce3464 |
1.5 | """ Each encoder should have following attributes and methods and be inherited from `_base.EncoderMixin`
Attributes:
_out_channels (list of int): specify number of channels for each encoder feature tensor
_depth (int): specify number of stages in decoder (in other words number of downsampling operations)
_in_channels (int): default number of input channels in first Conv2d layer for encoder (usually 3)
Methods:
forward(self, x: torch.Tensor)
produce list of features of different spatial resolutions, each feature is a 4D torch.tensor of
shape NCHW (features should be sorted in descending order according to spatial resolution, starting
with resolution same as input `x` tensor).
Input: `x` with shape (1, 3, 64, 64)
Output: [f0, f1, f2, f3, f4, f5] - features with corresponding shapes
[(1, 3, 64, 64), (1, 64, 32, 32), (1, 128, 16, 16), (1, 256, 8, 8),
(1, 512, 4, 4), (1, 1024, 2, 2)] (C - dim may differ)
also should support number of features according to specified depth, e.g. if depth = 5,
number of feature tensors = 6 (one with same resolution as input and 5 downsampled),
depth = 3 -> number of feature tensors = 4 (one with same resolution as input and 3 downsampled).
"""
import torch.nn as nn
from pretrainedmodels.models.inceptionv4 import BasicConv2d, InceptionV4, pretrained_settings
from ._base import EncoderMixin
class InceptionV4Encoder(InceptionV4, EncoderMixin):
def __init__(self, stage_idxs, out_channels, depth=5, **kwargs):
super().__init__(**kwargs)
self._stage_idxs = stage_idxs
self._out_channels = out_channels
self._depth = depth
self._in_channels = 3
# correct paddings
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.kernel_size == (3, 3):
m.padding = (1, 1)
if isinstance(m, nn.MaxPool2d):
m.padding = (1, 1)
# remove linear layers
del self.last_linear
def make_dilated(self, stage_list, dilation_list):
raise ValueError(
"InceptionV4 encoder does not support dilated mode "
"due to pooling operation for downsampling!"
)
def get_stages(self):
return [
nn.Identity(),
self.features[:self._stage_idxs[0]],
self.features[self._stage_idxs[0]:self._stage_idxs[1]],
self.features[self._stage_idxs[1]:self._stage_idxs[2]],
self.features[self._stage_idxs[2]:self._stage_idxs[3]],
self.features[self._stage_idxs[3]:],
]
def forward(self, x):
stages = self.get_stages()
features = []
for i in range(self._depth + 1):
x = stages[i](x)
features.append(x)
return features
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("last_linear.bias")
state_dict.pop("last_linear.weight")
super().load_state_dict(state_dict, **kwargs)
inceptionv4_encoders = {
"inceptionv4": {
"encoder": InceptionV4Encoder,
"pretrained_settings": pretrained_settings["inceptionv4"],
"params": {
"stage_idxs": (3, 5, 9, 15),
"out_channels": (3, 64, 192, 384, 1024, 1536),
"num_classes": 1001,
},
}
}
| [
"torch.nn.Identity"
] | 1.5.0 | PVSemk/segmentation_models.pytorch | 8d9b033be918dfc1e6186d9ef404cc7d2c171e8d |
1.3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# NOTE: To be removed once (if) https://github.com/pytorch/pytorch/pull/37385 lands
from __future__ import annotations
from collections import OrderedDict
import torch
from torch._six import container_abcs
from torch.nn import Module
class BufferDict(Module):
r"""Holds buffers in a dictionary.
BufferDict can be indexed like a regular Python dictionary, but buffers it
contains are properly registered, and will be visible by all Module methods.
:class:`~torch.nn.BufferDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~torch.nn.BufferDict.update`, the order of the merged ``OrderedDict``
or another :class:`~torch.nn.BufferDict` (the argument to
:meth:`~torch.nn.BufferDict.update`).
Note that :meth:`~torch.nn.BufferDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping.
Arguments:
buffers (iterable, optional): a mapping (dictionary) of
(string : :class:`~torch.Tensor`) or an iterable of key-value pairs
of type (string, :class:`~torch.Tensor`)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.buffers = nn.BufferDict({
'left': torch.randn(5, 10),
'right': torch.randn(5, 10)
})
def forward(self, x, choice):
x = self.buffers[choice].mm(x)
return x
"""
def __init__(self, buffers=None):
super(BufferDict, self).__init__()
if buffers is not None:
self.update(buffers)
def __getitem__(self, key):
return self._buffers[key]
def __setitem__(self, key, buffer):
self.register_buffer(key, buffer)
def __delitem__(self, key):
del self._buffers[key]
def __len__(self):
return len(self._buffers)
def __iter__(self):
return iter(self._buffers.keys())
def __contains__(self, key):
return key in self._buffers
def clear(self):
"""Remove all items from the BufferDict."""
self._buffers.clear()
def pop(self, key):
r"""Remove key from the BufferDict and return its buffer.
Arguments:
key (string): key to pop from the BufferDict
"""
v = self[key]
del self[key]
return v
def keys(self):
r"""Return an iterable of the BufferDict keys."""
return self._buffers.keys()
def items(self):
r"""Return an iterable of the BufferDict key/value pairs."""
return self._buffers.items()
def values(self):
r"""Return an iterable of the BufferDict values."""
return self._buffers.values()
def update(self, buffers):
r"""Update the :class:`~torch.nn.BufferDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`buffers` is an ``OrderedDict``, a :class:`~torch.nn.BufferDict`,
or an iterable of key-value pairs, the order of new elements in it is
preserved.
Arguments:
buffers (iterable): a mapping (dictionary) from string to
:class:`~torch.Tensor`, or an iterable of
key-value pairs of type (string, :class:`~torch.Tensor`)
"""
if not isinstance(buffers, container_abcs.Iterable):
raise TypeError(
"BuffersDict.update should be called with an "
"iterable of key/value pairs, but got " + type(buffers).__name__
)
if isinstance(buffers, container_abcs.Mapping):
if isinstance(buffers, (OrderedDict, BufferDict)):
for key, buffer in buffers.items():
self[key] = buffer
else:
for key, buffer in sorted(buffers.items()):
self[key] = buffer
else:
for j, p in enumerate(buffers):
if not isinstance(p, container_abcs.Iterable):
raise TypeError(
"BufferDict update sequence element "
"#" + str(j) + " should be Iterable; is" + type(p).__name__
)
if not len(p) == 2:
raise ValueError(
"BufferDict update sequence element "
"#" + str(j) + " has length " + str(len(p)) + "; 2 is required"
)
self[p[0]] = p[1]
def extra_repr(self):
child_lines = []
for k, p in self._buffers.items():
size_str = "x".join(str(size) for size in p.size())
device_str = "" if not p.is_cuda else " (GPU {})".format(p.get_device())
parastr = "Buffer containing: [{} of size {}{}]".format(
torch.typename(p), size_str, device_str
)
child_lines.append(" (" + k + "): " + parastr)
tmpstr = "\n".join(child_lines)
return tmpstr
def __call__(self, input):
raise RuntimeError("BufferDict should not be called.")
| [
"torch.typename"
] | 1.3 | SamuelMarks/botorch | 7801e2f56dc447322b2b6c92cab683d8900e4c7f |
1.3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class TestMock(BotorchTestCase):
def test_MockPosterior(self):
# test basic logic
mp = MockPosterior()
self.assertEqual(mp.device.type, "cpu")
self.assertEqual(mp.dtype, torch.float32)
self.assertEqual(mp.event_shape, torch.Size())
self.assertEqual(
MockPosterior(variance=torch.rand(2)).event_shape, torch.Size([2])
)
# test passing in tensors
mean = torch.rand(2)
variance = torch.eye(2)
samples = torch.rand(1, 2)
mp = MockPosterior(mean=mean, variance=variance, samples=samples)
self.assertEqual(mp.device.type, "cpu")
self.assertEqual(mp.dtype, torch.float32)
self.assertTrue(torch.equal(mp.mean, mean))
self.assertTrue(torch.equal(mp.variance, variance))
self.assertTrue(torch.all(mp.sample() == samples.unsqueeze(0)))
self.assertTrue(
torch.all(mp.sample(torch.Size([2])) == samples.repeat(2, 1, 1))
)
with self.assertRaises(RuntimeError):
mp.sample(sample_shape=torch.Size([2]), base_samples=torch.rand(3))
def test_MockModel(self):
mp = MockPosterior()
mm = MockModel(mp)
X = torch.empty(0)
self.assertEqual(mm.posterior(X), mp)
self.assertEqual(mm.num_outputs, 0)
mm.state_dict()
mm.load_state_dict()
| [
"torch.Size",
"torch.rand",
"torch.equal",
"torch.eye",
"torch.empty"
] | 1.3 | SamuelMarks/botorch | 7801e2f56dc447322b2b6c92cab683d8900e4c7f |
1.3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import torch
from botorch.cross_validation import batch_cross_validation, gen_loo_cv_folds
from botorch.exceptions.warnings import OptimizationWarning
from botorch.models.gp_regression import FixedNoiseGP, SingleTaskGP
from botorch.utils.testing import BotorchTestCase, _get_random_data
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
class TestFitBatchCrossValidation(BotorchTestCase):
def test_single_task_batch_cv(self):
n = 10
for batch_shape, num_outputs, dtype in itertools.product(
(torch.Size(), torch.Size([2])), (1, 2), (torch.float, torch.double)
):
tkwargs = {"device": self.device, "dtype": dtype}
train_X, train_Y = _get_random_data(
batch_shape=batch_shape, num_outputs=num_outputs, n=n, **tkwargs
)
if num_outputs == 1:
train_Y = train_Y.squeeze(-1)
train_Yvar = torch.full_like(train_Y, 0.01)
noiseless_cv_folds = gen_loo_cv_folds(train_X=train_X, train_Y=train_Y)
# check shapes
expected_shape_train_X = batch_shape + torch.Size(
[n, n - 1, train_X.shape[-1]]
)
expected_shape_test_X = batch_shape + torch.Size([n, 1, train_X.shape[-1]])
self.assertEqual(noiseless_cv_folds.train_X.shape, expected_shape_train_X)
self.assertEqual(noiseless_cv_folds.test_X.shape, expected_shape_test_X)
expected_shape_train_Y = batch_shape + torch.Size([n, n - 1, num_outputs])
expected_shape_test_Y = batch_shape + torch.Size([n, 1, num_outputs])
self.assertEqual(noiseless_cv_folds.train_Y.shape, expected_shape_train_Y)
self.assertEqual(noiseless_cv_folds.test_Y.shape, expected_shape_test_Y)
self.assertIsNone(noiseless_cv_folds.train_Yvar)
self.assertIsNone(noiseless_cv_folds.test_Yvar)
# Test SingleTaskGP
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
cv_results = batch_cross_validation(
model_cls=SingleTaskGP,
mll_cls=ExactMarginalLogLikelihood,
cv_folds=noiseless_cv_folds,
fit_args={"options": {"maxiter": 1}},
)
expected_shape = batch_shape + torch.Size([n, 1, num_outputs])
self.assertEqual(cv_results.posterior.mean.shape, expected_shape)
self.assertEqual(cv_results.observed_Y.shape, expected_shape)
# Test FixedNoiseGP
noisy_cv_folds = gen_loo_cv_folds(
train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar
)
# check shapes
self.assertEqual(noisy_cv_folds.train_X.shape, expected_shape_train_X)
self.assertEqual(noisy_cv_folds.test_X.shape, expected_shape_test_X)
self.assertEqual(noisy_cv_folds.train_Y.shape, expected_shape_train_Y)
self.assertEqual(noisy_cv_folds.test_Y.shape, expected_shape_test_Y)
self.assertEqual(noisy_cv_folds.train_Yvar.shape, expected_shape_train_Y)
self.assertEqual(noisy_cv_folds.test_Yvar.shape, expected_shape_test_Y)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=OptimizationWarning)
cv_results = batch_cross_validation(
model_cls=FixedNoiseGP,
mll_cls=ExactMarginalLogLikelihood,
cv_folds=noisy_cv_folds,
fit_args={"options": {"maxiter": 1}},
)
self.assertEqual(cv_results.posterior.mean.shape, expected_shape)
self.assertEqual(cv_results.observed_Y.shape, expected_shape)
self.assertEqual(cv_results.observed_Y.shape, expected_shape)
| [
"torch.Size",
"torch.full_like"
] | 1.3 | SamuelMarks/botorch | 7801e2f56dc447322b2b6c92cab683d8900e4c7f |
3 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
# acting on behalf of its Max Planck Institute for Intelligent Systems and the
# Max Planck Institute for Biological Cybernetics. All rights reserved.
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
# on this computer program. You can only use this computer program if you have closed a license agreement
# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and liable to prosecution.
# Contact: [email protected]
#
#
# If you use this code in a research publication please consider citing the following:
#
# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>
#
#
# Code Developed by:
# Nima Ghorbani <https://nghorbani.github.io/>
#
# 2020.12.12
def pyrenderer(imw=2048, imh=2048):
from body_visualizer.mesh.mesh_viewer import MeshViewer
import cv2
import numpy as np
import trimesh
try:
mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
except:
import os
os.environ['PYOPENGL_PLATFORM'] = 'egl'
os.environ['EGL_DEVICE_ID'] = os.environ['GPU_DEVICE_ORDINAL'].split(',')[0]
mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
mv.set_cam_trans([0, -0.5, 2.])
def render_an_image(meshes):
n_all = len(meshes)
nc = int(np.sqrt(n_all))
out_image = np.zeros([1, 1, 1, mv.width, mv.height, 4])
scale_percent = 100./nc
width = int(mv.width * scale_percent / 100)
height = int(mv.height * scale_percent / 100)
dim = (width, height)
for rId in range(nc):
for cId in range(nc):
i = (nc*rId) + cId
if i>len(meshes): break
mesh = meshes[i]
# mesh.apply_transform(trimesh.transformations.rotation_matrix(np.radians(-90), (1, 0, 0)))
mesh.vertices -= np.median(np.array(mesh.vertices), axis=0)
mv.set_dynamic_meshes([mesh])
img = mv.render(render_wireframe=False, RGBA=True)
img_resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
out_image[0, 0, 0, (rId*width):((rId+1)*width), (cId*height):((cId+1)*height)] = cv2.cvtColor(img_resized, cv2.COLOR_BGRA2RGBA)
return out_image.astype(np.uint8)
return render_an_image
def vposer_trainer_renderer(bm, num_bodies_to_display=5):
import numpy as np
import trimesh
import torch
from body_visualizer.tools.vis_tools import imagearray2file, colors
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from human_body_prior.tools.omni_tools import makepath
from trimesh import Trimesh as Mesh
from trimesh.util import concatenate as mesh_cat
renderer = pyrenderer(1024, 1024)
faces = c2c(bm.f)
def render_once(body_parms, body_colors=[colors['grey'], colors['brown-light']], out_fname=None):
'''
:param body_parms: list of dictionaries of body parameters.
:param body_colors: list of np arrays of color rgb values
:param movie_outpath: a mp4 path
:return:
'''
if out_fname is not None: makepath(out_fname, isfile=True)
assert len(body_parms) <= len(body_colors), ValueError('Not enough colors provided for #{} body_parms'.format(len(body_parms)))
bs = body_parms[0]['pose_body'].shape[0]
body_ids = np.random.choice(bs, num_bodies_to_display)
body_evals = [c2c(bm(root_orient=v['root_orient'].view(bs, -1) if 'root_orient' in v else torch.zeros(bs, 3).type_as(v['pose_body']),
pose_body=v['pose_body'].contiguous().view(bs, -1)).v) for v in body_parms]
num_verts = body_evals[0].shape[1]
render_meshes = []
for bId in body_ids:
concat_cur_meshes = None
for body, body_color in zip(body_evals, body_colors):
cur_body_mesh = Mesh(body[bId], faces, vertex_colors=np.ones([num_verts, 3]) * body_color)
concat_cur_meshes = cur_body_mesh if concat_cur_meshes is None else mesh_cat(concat_cur_meshes, cur_body_mesh)
render_meshes.append(concat_cur_meshes)
img = renderer(render_meshes)
if out_fname is not None: imagearray2file(img, out_fname, fps=10)
return
return render_once
| [
"torch.zeros"
] | 3 | zokin/human_body_prior | 0278cb45180992e4d39ba1a11601f5ecc53ee148 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.