version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.8 | from torch import nn, optim
from torch.utils import data
from pytorch_lightning import Trainer
from asteroid.engine.system import System
from asteroid.utils.test_utils import DummyDataset
from asteroid.engine.schedulers import NoamScheduler, DPTNetScheduler
def common_setup():
model = nn.Sequential(nn.Linear(10, 10), nn.ReLU())
optimizer = optim.Adam(model.parameters(), lr=1e-3)
dataset = DummyDataset()
loader = data.DataLoader(dataset, batch_size=2, num_workers=4)
trainer = Trainer(max_epochs=1, fast_dev_run=True)
return model, optimizer, loader, trainer
def test_state_dict():
""" Load and serialize scheduler. """
model, optimizer, loader, trainer = common_setup()
sched = NoamScheduler(optimizer, d_model=10, warmup_steps=100)
state_dict = sched.state_dict()
sched.load_state_dict(state_dict)
state_dict_c = sched.state_dict()
assert state_dict == state_dict_c
# Test zero_grad
sched.zero_grad()
def test_noam_scheduler():
model, optimizer, loader, trainer = common_setup()
scheduler = {
"scheduler": NoamScheduler(optimizer, d_model=10, warmup_steps=100),
"interval": "step",
}
system = System(
model,
optimizer,
loss_func=nn.MSELoss(),
train_loader=loader,
val_loader=loader,
scheduler=scheduler,
)
trainer.fit(system)
# Test `as_tensor` for `plot`
scheduler["scheduler"].as_tensor()
def test_dptnet_scheduler():
model, optimizer, loader, trainer = common_setup()
scheduler = {
"scheduler": DPTNetScheduler(optimizer, d_model=10, steps_per_epoch=6, warmup_steps=4),
"interval": "step",
}
system = System(
model,
optimizer,
loss_func=nn.MSELoss(),
train_loader=loader,
val_loader=loader,
scheduler=scheduler,
)
trainer.fit(system)
# Test `as_tensor` for `plot`
scheduler["scheduler"].as_tensor()
| [
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"torch.nn.ReLU"
] | 1.8.0 | ldelebec/asteroid | d6390baca5409634f112ceed554ea66c4054cb54 |
1.8 | from itertools import permutations
import torch
from torch import nn
from scipy.optimize import linear_sum_assignment
class PITLossWrapper(nn.Module):
r"""Permutation invariant loss wrapper.
Args:
loss_func: function with signature (est_targets, targets, **kwargs).
pit_from (str): Determines how PIT is applied.
* ``'pw_mtx'`` (pairwise matrix): `loss_func` computes pairwise
losses and returns a torch.Tensor of shape
:math:`(batch, n\_src, n\_src)`. Each element
:math:`(batch, i, j)` corresponds to the loss between
:math:`targets[:, i]` and :math:`est\_targets[:, j]`
* ``'pw_pt'`` (pairwise point): `loss_func` computes the loss for
a batch of single source and single estimates (tensors won't
have the source axis). Output shape : :math:`(batch)`.
See :meth:`~PITLossWrapper.get_pw_losses`.
* ``'perm_avg'`` (permutation average): `loss_func` computes the
average loss for a given permutations of the sources and
estimates. Output shape : :math:`(batch)`.
See :meth:`~PITLossWrapper.best_perm_from_perm_avg_loss`.
In terms of efficiency, ``'perm_avg'`` is the least efficicient.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) --> (B, n\_src!)`.
`perm_reduce` can receive **kwargs during forward using the
`reduce_kwargs` argument (dict). If those argument are static,
consider defining a small function or using `functools.partial`.
Only used in `'pw_mtx'` and `'pw_pt'` `pit_from` modes.
For each of these modes, the best permutation and reordering will be
automatically computed. When either ``'pw_mtx'`` or ``'pw_pt'`` is used,
and the number of sources is larger than three, the hungarian algorithm is
used to find the best permutation.
Examples
>>> import torch
>>> from asteroid.losses import pairwise_neg_sisdr
>>> sources = torch.randn(10, 3, 16000)
>>> est_sources = torch.randn(10, 3, 16000)
>>> # Compute PIT loss based on pairwise losses
>>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
>>> loss_val = loss_func(est_sources, sources)
>>>
>>> # Using reduce
>>> def reduce(perm_loss, src):
>>> weighted = perm_loss * src.norm(dim=-1, keepdim=True)
>>> return torch.mean(weighted, dim=-1)
>>>
>>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx',
>>> perm_reduce=reduce)
>>> reduce_kwargs = {'src': sources}
>>> loss_val = loss_func(est_sources, sources,
>>> reduce_kwargs=reduce_kwargs)
"""
def __init__(self, loss_func, pit_from="pw_mtx", perm_reduce=None):
super().__init__()
self.loss_func = loss_func
self.pit_from = pit_from
self.perm_reduce = perm_reduce
if self.pit_from not in ["pw_mtx", "pw_pt", "perm_avg"]:
raise ValueError(
"Unsupported loss function type for now. Expected"
"one of [`pw_mtx`, `pw_pt`, `perm_avg`]"
)
def forward(self, est_targets, targets, return_est=False, reduce_kwargs=None, **kwargs):
r"""Find the best permutation and return the loss.
Args:
est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of training targets
return_est: Boolean. Whether to return the reordered targets
estimates (To compute metrics or to save example).
reduce_kwargs (dict or None): kwargs that will be passed to the
pairwise losses reduce function (`perm_reduce`).
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
- Best permutation loss for each batch sample, average over
the batch.
- The reordered targets estimates if ``return_est`` is True.
:class:`torch.Tensor` of shape $(batch, nsrc, ...)$.
"""
n_src = targets.shape[1]
assert n_src < 10, f"Expected source axis along dim 1, found {n_src}"
if self.pit_from == "pw_mtx":
# Loss function already returns pairwise losses
pw_losses = self.loss_func(est_targets, targets, **kwargs)
elif self.pit_from == "pw_pt":
# Compute pairwise losses with a for loop.
pw_losses = self.get_pw_losses(self.loss_func, est_targets, targets, **kwargs)
elif self.pit_from == "perm_avg":
# Cannot get pairwise losses from this type of loss.
# Find best permutation directly.
min_loss, batch_indices = self.best_perm_from_perm_avg_loss(
self.loss_func, est_targets, targets, **kwargs
)
# Take the mean over the batch
mean_loss = torch.mean(min_loss)
if not return_est:
return mean_loss
reordered = self.reorder_source(est_targets, batch_indices)
return mean_loss, reordered
else:
return
assert pw_losses.ndim == 3, (
"Something went wrong with the loss " "function, please read the docs."
)
assert pw_losses.shape[0] == targets.shape[0], "PIT loss needs same batch dim as input"
reduce_kwargs = reduce_kwargs if reduce_kwargs is not None else dict()
min_loss, batch_indices = self.find_best_perm(
pw_losses, perm_reduce=self.perm_reduce, **reduce_kwargs
)
mean_loss = torch.mean(min_loss)
if not return_est:
return mean_loss
reordered = self.reorder_source(est_targets, batch_indices)
return mean_loss, reordered
@staticmethod
def get_pw_losses(loss_func, est_targets, targets, **kwargs):
r"""Get pair-wise losses between the training targets and its estimate
for a given loss function.
Args:
loss_func: function with signature (est_targets, targets, **kwargs)
The loss function to get pair-wise losses from.
est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of training targets.
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
torch.Tensor or size $(batch, nsrc, nsrc)$, losses computed for
all permutations of the targets and est_targets.
This function can be called on a loss function which returns a tensor
of size :math:`(batch)`. There are more efficient ways to compute pair-wise
losses using broadcasting.
"""
batch_size, n_src, *_ = targets.shape
pair_wise_losses = targets.new_empty(batch_size, n_src, n_src)
for est_idx, est_src in enumerate(est_targets.transpose(0, 1)):
for target_idx, target_src in enumerate(targets.transpose(0, 1)):
pair_wise_losses[:, est_idx, target_idx] = loss_func(est_src, target_src, **kwargs)
return pair_wise_losses
@staticmethod
def best_perm_from_perm_avg_loss(loss_func, est_targets, targets, **kwargs):
r"""Find best permutation from loss function with source axis.
Args:
loss_func: function with signature $(est_targets, targets, **kwargs)$
The loss function batch losses from.
est_targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.
The batch of training targets.
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
n_src = targets.shape[1]
perms = torch.tensor(list(permutations(range(n_src))), dtype=torch.long)
loss_set = torch.stack(
[loss_func(est_targets[:, perm], targets, **kwargs) for perm in perms], dim=1
)
# Indexes and values of min losses for each batch element
min_loss, min_loss_idx = torch.min(loss_set, dim=1)
# Permutation indices for each batch.
batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)
return min_loss, batch_indices
@staticmethod
def find_best_perm(pair_wise_losses, perm_reduce=None, **kwargs):
r"""Find the best permutation, given the pair-wise losses.
Dispatch between factorial method if number of sources is small (<3)
and hungarian method for more sources. If ``perm_reduce`` is not None,
the factorial method is always used.
Args:
pair_wise_losses (:class:`torch.Tensor`):
Tensor of shape :math:`(batch, n\_src, n\_src)`. Pairwise losses.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) -> (B, n\_src!)`
**kwargs: additional keyword argument that will be passed to the
permutation reduce function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
n_src = pair_wise_losses.shape[-1]
if perm_reduce is not None or n_src <= 3:
min_loss, batch_indices = PITLossWrapper.find_best_perm_factorial(
pair_wise_losses, perm_reduce=perm_reduce, **kwargs
)
else:
min_loss, batch_indices = PITLossWrapper.find_best_perm_hungarian(pair_wise_losses)
return min_loss, batch_indices
@staticmethod
def reorder_source(source, batch_indices):
r"""Reorder sources according to the best permutation.
Args:
source (torch.Tensor): Tensor of shape :math:`(batch, n_src, time)`
batch_indices (torch.Tensor): Tensor of shape :math:`(batch, n_src)`.
Contains optimal permutation indices for each batch.
Returns:
:class:`torch.Tensor`: Reordered sources.
"""
reordered_sources = torch.stack(
[torch.index_select(s, 0, b) for s, b in zip(source, batch_indices)]
)
return reordered_sources
@staticmethod
def find_best_perm_factorial(pair_wise_losses, perm_reduce=None, **kwargs):
r"""Find the best permutation given the pair-wise losses by looping
through all the permutations.
Args:
pair_wise_losses (:class:`torch.Tensor`):
Tensor of shape :math:`(batch, n_src, n_src)`. Pairwise losses.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) -> (B, n\_src!)`
**kwargs: additional keyword argument that will be passed to the
permutation reduce function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
MIT Copyright (c) 2018 Kaituo XU.
See `Original code
<https://github.com/kaituoxu/Conv-TasNet/blob/master>`__ and `License
<https://github.com/kaituoxu/Conv-TasNet/blob/master/LICENSE>`__.
"""
n_src = pair_wise_losses.shape[-1]
# After transposition, dim 1 corresp. to sources and dim 2 to estimates
pwl = pair_wise_losses.transpose(-1, -2)
perms = pwl.new_tensor(list(permutations(range(n_src))), dtype=torch.long)
# Column permutation indices
idx = torch.unsqueeze(perms, 2)
# Loss mean of each permutation
if perm_reduce is None:
# one-hot, [n_src!, n_src, n_src]
perms_one_hot = pwl.new_zeros((*perms.size(), n_src)).scatter_(2, idx, 1)
loss_set = torch.einsum("bij,pij->bp", [pwl, perms_one_hot])
loss_set /= n_src
else:
# batch = pwl.shape[0]; n_perm = idx.shape[0]
# [batch, n_src!, n_src] : Pairwise losses for each permutation.
pwl_set = pwl[:, torch.arange(n_src), idx.squeeze(-1)]
# Apply reduce [batch, n_src!, n_src] --> [batch, n_src!]
loss_set = perm_reduce(pwl_set, **kwargs)
# Indexes and values of min losses for each batch element
min_loss, min_loss_idx = torch.min(loss_set, dim=1)
# Permutation indices for each batch.
batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)
return min_loss, batch_indices
@staticmethod
def find_best_perm_hungarian(pair_wise_losses: torch.Tensor):
"""
Find the best permutation given the pair-wise losses, using the Hungarian algorithm.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size (batch,).
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
# After transposition, dim 1 corresp. to sources and dim 2 to estimates
pwl = pair_wise_losses.transpose(-1, -2)
# Just bring the numbers to cpu(), not the graph
pwl_copy = pwl.detach().cpu()
# Loop over batch + row indices are always ordered for square matrices.
batch_indices = torch.tensor([linear_sum_assignment(pwl)[1] for pwl in pwl_copy]).to(
pwl.device
)
min_loss = torch.gather(pwl, 2, batch_indices[..., None]).mean([-1, -2])
return min_loss, batch_indices
class PITReorder(PITLossWrapper):
"""Permutation invariant reorderer. Only returns the reordered estimates.
See `:py:class:asteroid.losses.PITLossWrapper`."""
def forward(self, est_targets, targets, reduce_kwargs=None, **kwargs):
_, reordered = super().forward(
est_targets=est_targets,
targets=targets,
return_est=True,
reduce_kwargs=reduce_kwargs,
**kwargs,
)
return reordered
| [
"torch.stack",
"torch.min",
"torch.einsum",
"torch.gather",
"torch.arange",
"torch.unsqueeze",
"torch.index_select",
"torch.mean"
] | 1.8.0 | ldelebec/asteroid | d6390baca5409634f112ceed554ea66c4054cb54 |
1.8 | import torch
from torch.utils.data._utils.collate import default_collate
def online_mixing_collate(batch):
"""Mix target sources to create new mixtures.
Output of the default collate function is expected to return two objects:
inputs and targets.
"""
# Inputs (batch, time) / targets (batch, n_src, time)
inputs, targets = default_collate(batch)
batch, n_src, _ = targets.shape
energies = torch.sum(targets ** 2, dim=-1, keepdim=True)
new_src = []
for i in range(targets.shape[1]):
new_s = targets[torch.randperm(batch), i, :]
new_s = new_s * torch.sqrt(energies[:, i] / (new_s ** 2).sum(-1, keepdims=True))
new_src.append(new_s)
targets = torch.stack(new_src, dim=1)
inputs = targets.sum(1)
return inputs, targets
| [
"torch.randperm",
"torch.utils.data._utils.collate.default_collate",
"torch.stack",
"torch.sum"
] | 1.8.0 | ldelebec/asteroid | d6390baca5409634f112ceed554ea66c4054cb54 |
1.5 | """Utility code for running native pytorch distributed"""
import os
import torch.distributed as dist
def init_workers_file():
rank = int(os.environ['SLURM_PROCID'])
n_ranks = int(os.environ['SLURM_NTASKS'])
sync_file = 'file:///tmp/%s_%s_pytorch_sync' % (
os.environ['USER'], os.environ['SLURM_JOB_ID'])
dist.init_process_group(backend='nccl', world_size=n_ranks, rank=rank,
init_method=sync_file)
return rank, n_ranks
def init_workers_mpi():
dist.init_process_group(backend='mpi')
rank = dist.get_rank()
n_ranks = dist.get_world_size()
return rank, n_ranks
| [
"torch.distributed.init_process_group",
"torch.distributed.get_world_size",
"torch.distributed.get_rank"
] | 1.5.0 | caditi97/exatrkx-ctd2020 | ed090ddfcc9e2e623fb45000fca71d5ad6ccf3b9 |
1.0 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import warnings
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union
import torch
from torch.nn.utils.rnn import pad_sequence
from ..file_utils import PaddingStrategy
from ..modeling_utils import PreTrainedModel
from ..models.bert import BertTokenizer, BertTokenizerFast
from ..tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase
InputDataClass = NewType("InputDataClass", Any)
"""
A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary
of Tensors.
"""
DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, torch.Tensor]])
def default_data_collator(features: List[InputDataClass]) -> Dict[str, torch.Tensor]:
"""
Very simple data collator that simply collates batches of dict-like objects and performs special handling for
potential keys named:
- ``label``: handles a single value (int or float) per object
- ``label_ids``: handles a list of values per object
Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
to the model. See glue and ner for example of how it's useful.
"""
# In this function we'll make the assumption that all `features` in the batch
# have the same attributes.
# So we will look at the first element as a proxy for what attributes exist
# on the whole batch.
if not isinstance(features[0], (dict, BatchEncoding)):
features = [vars(f) for f in features]
first = features[0]
batch = {}
# Special handling for labels.
# Ensure that tensor is created with the correct type
# (it should be automatically the case, but let's make sure of it.)
if "label" in first and first["label"] is not None:
label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"]
dtype = torch.long if isinstance(label, int) else torch.float
batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype)
elif "label_ids" in first and first["label_ids"] is not None:
if isinstance(first["label_ids"], torch.Tensor):
batch["labels"] = torch.stack([f["label_ids"] for f in features])
else:
dtype = torch.long if type(first["label_ids"][0]) is int else torch.float
batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype)
# Handling of all other possible keys.
# Again, we will use the first element to figure out which key/values are not None for this model.
for k, v in first.items():
if k not in ("label", "label_ids") and v is not None and not isinstance(v, str):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features])
return batch
@dataclass
class DataCollatorWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
if "label" in batch:
batch["labels"] = batch["label"]
del batch["label"]
if "label_ids" in batch:
batch["labels"] = batch["label_ids"]
del batch["label_ids"]
return batch
@dataclass
class DataCollatorForTokenClassification:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
batch = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
# Conversion to tensors will fail if we have labels as they are not of the same length yet.
return_tensors="pt" if labels is None else None,
)
if labels is None:
return batch
sequence_length = torch.tensor(batch["input_ids"]).shape[1]
padding_side = self.tokenizer.padding_side
if padding_side == "right":
batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels]
else:
batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels]
batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()}
return batch
def _collate_batch(examples, tokenizer, pad_to_multiple_of: Optional[int] = None):
"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""
# Tensorize if necessary.
if isinstance(examples[0], (list, tuple)):
examples = [torch.tensor(e, dtype=torch.long) for e in examples]
# Check if padding is necessary.
length_of_first = examples[0].size(0)
are_tensors_same_length = all(x.size(0) == length_of_first for x in examples)
if are_tensors_same_length and (pad_to_multiple_of is None or length_of_first % pad_to_multiple_of == 0):
return torch.stack(examples, dim=0)
# If yes, check if we have a `pad_token`.
if tokenizer._pad_token is None:
raise ValueError(
"You are attempting to pad samples but the tokenizer you are using"
f" ({tokenizer.__class__.__name__}) does not have a pad token."
)
# Creating the full tensor and filling it with our data.
max_length = max(x.size(0) for x in examples)
if pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id)
for i, example in enumerate(examples):
if tokenizer.padding_side == "right":
result[i, : example.shape[0]] = example
else:
result[i, -example.shape[0] :] = example
return result
def tolist(x: Union[List[Any], torch.Tensor]):
return x.tolist() if isinstance(x, torch.Tensor) else x
@dataclass
class DataCollatorForSeq2Seq:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
model (:class:`~transformers.PreTrainedModel`):
The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to
prepare the `decoder_input_ids`
This is useful when using `label_smoothing` to avoid calculating loss twice.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[PreTrainedModel] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# prepare decoder_input_ids
if self.model is not None and hasattr(self.model, "prepare_decoder_input_ids_from_labels"):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
@dataclass
class DataCollatorForLanguageModeling:
"""
Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
are not all of the same length.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
mlm (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the
inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for
non-masked tokens and the value to predict for the masked token.
mlm_probability (:obj:`float`, `optional`, defaults to 0.15):
The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
.. note::
For best performance, this data collator should be used with a dataset having items that are dictionaries or
BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a
:class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the
argument :obj:`return_special_tokens_mask=True`.
"""
tokenizer: PreTrainedTokenizerBase
mlm: bool = True
mlm_probability: float = 0.15
pad_to_multiple_of: Optional[int] = None
def __post_init__(self):
if self.mlm and self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
# Handle dict or lists with proper padding and conversion to tensor.
if isinstance(examples[0], (dict, BatchEncoding)):
batch = self.tokenizer.pad(examples, return_tensors="pt", pad_to_multiple_of=self.pad_to_multiple_of)
else:
batch = {"input_ids": _collate_batch(examples, self.tokenizer, pad_to_multiple_of=self.pad_to_multiple_of)}
# If special token mask has been preprocessed, pop it from the dict.
special_tokens_mask = batch.pop("special_tokens_mask", None)
if self.mlm:
batch["input_ids"], batch["labels"] = self.mask_tokens(
batch["input_ids"], special_tokens_mask=special_tokens_mask
)
else:
labels = batch["input_ids"].clone()
if self.tokenizer.pad_token_id is not None:
labels[labels == self.tokenizer.pad_token_id] = -100
batch["labels"] = labels
return batch
def mask_tokens(
self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
if special_tokens_mask is None:
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling):
"""
Data collator used for language modeling that masks entire words.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for masked language modeling
.. note::
This collator relies on details of the implementation of subword tokenization by
:class:`~transformers.BertTokenizer`, specifically that subword tokens are prefixed with `##`. For tokenizers
that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to
:class:`.DataCollatorForLanguageModeling`.
"""
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
input_ids = [e["input_ids"] for e in examples]
else:
input_ids = examples
examples = [{"input_ids": e} for e in examples]
batch_input = _collate_batch(input_ids, self.tokenizer)
mask_labels = []
for e in examples:
ref_tokens = []
for id in tolist(e["input_ids"]):
token = self.tokenizer._convert_id_to_token(id)
ref_tokens.append(token)
# For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢]
if "chinese_ref" in e:
ref_pos = tolist(e["chinese_ref"])
len_seq = len(e["input_ids"])
for i in range(len_seq):
if i in ref_pos:
ref_tokens[i] = "##" + ref_tokens[i]
mask_labels.append(self._whole_word_mask(ref_tokens))
batch_mask = _collate_batch(mask_labels, self.tokenizer)
inputs, labels = self.mask_tokens(batch_input, batch_mask)
return {"input_ids": inputs, "labels": labels}
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers."
"Please refer to the documentation for more information."
)
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert len(covered_indexes) == len(masked_lms)
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
@dataclass
class DataCollatorForSOP(DataCollatorForLanguageModeling):
"""
Data collator used for sentence order prediction task.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for both masked language modeling and sentence order prediction
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "
"DataCollatorForLanguageModeling instead.",
FutureWarning,
)
def __call__(self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
input_ids = [example["input_ids"] for example in examples]
input_ids = _collate_batch(input_ids, self.tokenizer)
input_ids, labels, attention_mask = self.mask_tokens(input_ids)
token_type_ids = [example["token_type_ids"] for example in examples]
# size of segment_ids varied because randomness, padding zero to the end as the original implementation
token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
sop_label_list = [example["sentence_order_label"] for example in examples]
sentence_order_label = torch.stack(sop_label_list)
return {
"input_ids": input_ids,
"labels": labels,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
"sentence_order_label": sentence_order_label,
}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
original. N-gram not applied yet.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = torch.full(labels.shape, self.mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
# probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value
attention_mask = (~masked_indices).float()
if self.tokenizer._pad_token is not None:
attention_padding_mask = labels.eq(self.tokenizer.pad_token_id)
attention_mask.masked_fill_(attention_padding_mask, value=1.0)
labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels, attention_mask
@dataclass
class DataCollatorForPermutationLanguageModeling:
"""
Data collator used for permutation language modeling.
- collates batches of tensors, honoring their tokenizer's pad_token
- preprocesses batches for permutation language modeling with procedures specific to XLNet
"""
tokenizer: PreTrainedTokenizerBase
plm_probability: float = 1 / 6
max_span_length: int = 5 # maximum length of a span of masked tokens
def __call__(
self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]]
) -> Dict[str, torch.Tensor]:
if isinstance(examples[0], (dict, BatchEncoding)):
examples = [e["input_ids"] for e in examples]
batch = _collate_batch(examples, self.tokenizer)
inputs, perm_mask, target_mapping, labels = self.mask_tokens(batch)
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
The masked tokens to be predicted for a particular sequence are determined by the following algorithm:
0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far).
1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be
masked)
2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be
masked
3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length -
span_length]`` and mask tokens ``start_index:start_index + span_length``
4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in
the sequence to be processed), repeat from Step 1.
"""
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer."
)
if inputs.size(1) % 2 != 0:
raise ValueError(
"This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details."
)
labels = inputs.clone()
# Creating the mask and target_mapping tensors
masked_indices = torch.full(labels.shape, 0, dtype=torch.bool)
target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
cur_len = 0
max_len = labels.size(1)
while cur_len < max_len:
# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
span_length = torch.randint(1, self.max_span_length + 1, (1,)).item()
# Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked
context_length = int(span_length / self.plm_probability)
# Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length`
start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item()
masked_indices[i, start_index : start_index + span_length] = 1
# Set `cur_len = cur_len + context_length`
cur_len += context_length
# Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether,
# the i-th predict corresponds to the i-th token.
target_mapping[i] = torch.eye(labels.size(1))
special_tokens_mask = torch.tensor(
[self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()],
dtype=torch.bool,
)
masked_indices.masked_fill_(special_tokens_mask, value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
masked_indices.masked_fill_(padding_mask, value=0.0)
# Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc.
non_func_mask = ~(padding_mask | special_tokens_mask)
inputs[masked_indices] = self.tokenizer.mask_token_id
labels[~masked_indices] = -100 # We only compute loss on masked tokens
perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32)
for i in range(labels.size(0)):
# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will
# determine which tokens a given token can attend to (encoded in `perm_mask`).
# Note: Length of token sequence being permuted has to be less than or equal to reused sequence length
# (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation,
# we assume that reused length is half of sequence length and permutation length is equal to reused length.
# This requires that the sequence length be even.
# Create a linear factorisation order
perm_index = torch.arange(labels.size(1))
# Split this into two halves, assuming that half the sequence is reused each time
perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1)
# Permute the two halves such that they do not cross over
perm_index = perm_index[torch.randperm(labels.size(1) // 2)]
# Flatten this out into the desired permuted factorisation order
perm_index = torch.flatten(perm_index.transpose(0, 1))
# Set the permutation indices of non-masked (non-functional) tokens to the
# smallest index (-1) so that:
# (1) They can be seen by all other positions
# (2) They cannot see masked positions, so there won't be information leak
perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1)
# The logic for whether the i-th token can attend on the j-th token based on the factorisation order:
# 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token
# 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token
perm_mask[i] = (
perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1)))
) & masked_indices[i]
return inputs.long(), perm_mask, target_mapping, labels.long()
| [
"torch.stack",
"torch.nn.utils.rnn.pad_sequence",
"torch.randint",
"torch.full",
"torch.tensor",
"torch.bernoulli"
] | 1.0 | arunraja-hub/transformers | 3f51e6a35871fefbdfb705902355d7530a72d1b8 |
1.10 | """
Copyright (c) 2021 Olivier Sprangers as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
https://github.com/elephaint/pgbm/blob/main/LICENSE
"""
#%% Load packages
import torch
from pgbm import PGBM
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_california_housing
import matplotlib.pyplot as plt
#%% Objective for pgbm
def mseloss_objective(yhat, y, sample_weight=None):
gradient = (yhat - y)
hessian = torch.ones_like(yhat)
return gradient, hessian
def rmseloss_metric(yhat, y, sample_weight=None):
loss = (yhat - y).pow(2).mean().sqrt()
return loss
#%% Load data
X, y = fetch_california_housing(return_X_y=True)
#%% Parameters
params = {'min_split_gain':0,
'min_data_in_leaf':2,
'max_leaves':8,
'max_bin':64,
'learning_rate':0.1,
'n_estimators':2000,
'verbose':2,
'early_stopping_rounds':100,
'feature_fraction':1,
'bagging_fraction':1,
'seed':1,
'reg_lambda':1,
'device':'gpu',
'gpu_device_id':0,
'derivatives':'exact',
'distribution':'normal'}
n_forecasts = 1000
n_splits = 2
base_estimators = 2000
#%% Validation loop
rmse, crps = torch.zeros(n_splits), torch.zeros(n_splits)
for i in range(n_splits):
print(f'Fold {i+1}/{n_splits}')
# Split for model validation
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=i)
X_train_val, X_val, y_train_val, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=i)
# Build datasets
train_data = (X_train, y_train)
train_val_data = (X_train_val, y_train_val)
valid_data = (X_val, y_val)
# Train to retrieve best iteration
print('PGBM Validating on partial dataset...')
params['n_estimators'] = base_estimators
model = PGBM()
model.train(train_val_data, objective=mseloss_objective, metric=rmseloss_metric, valid_set=valid_data, params=params)
# Set iterations to best iteration
params['n_estimators'] = model.best_iteration
# Retrain on full set
print('PGBM Training on full dataset...')
model = PGBM()
model.train(train_data, objective=mseloss_objective, metric=rmseloss_metric, params=params)
#% Predictions
print('PGBM Prediction...')
yhat_point = model.predict(X_test)
yhat_dist = model.predict_dist(X_test, n_forecasts=n_forecasts)
# Scoring
rmse[i] = model.metric(yhat_point.cpu(), y_test)
crps[i] = model.crps_ensemble(yhat_dist.cpu(), y_test).mean()
# Print scores current fold
print(f'RMSE Fold {i+1}, {rmse[i]:.2f}')
print(f'CRPS Fold {i+1}, {crps[i]:.2f}')
# Print final scores
print(f'RMSE {rmse.mean():.2f}+-{rmse.std():.2f}')
print(f'CRPS {crps.mean():.2f}+-{crps.std():.2f}')
#%% Plot all samples
plt.plot(y_test, 'o', label='Actual')
plt.plot(yhat_point.cpu(), 'ko', label='Point prediction PGBM')
plt.plot(yhat_dist.cpu().max(dim=0).values, 'k--', label='Max bound PGBM')
plt.plot(yhat_dist.cpu().min(dim=0).values, 'k--', label='Min bound PGBM')
plt.legend() | [
"torch.zeros",
"torch.ones_like"
] | 1.10.0 | ii-research-yu/pgbm | d050a5f71f1a458d8269c4f5201744c0d7c4d487 |
1.4 | #!/usr/bin/env python3
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import os
import csv
import glob
import time
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
from collections import OrderedDict
from contextlib import suppress
from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_legacy
from ptflops import get_model_complexity_info
import pdb
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--model', '-m', metavar='NAME', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',
help='disable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--amp', action='store_true', default=False,
help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--legacy-jit', dest='legacy_jit', action='store_true',
help='use legacy jit mode for pytorch 1.5/1.5.1/1.6 to get back fusion performance')
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
parser.add_argument('--params', action='store_true', default=False,
help='only caculate params')
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
amp_autocast = suppress # do nothing
if args.amp:
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
else:
_logger.warning("Neither APEX or Native Torch AMP is available.")
assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
if args.native_amp:
amp_autocast = torch.cuda.amp.autocast
_logger.info('Validating in mixed precision with native PyTorch AMP.')
elif args.apex_amp:
_logger.info('Validating in mixed precision with NVIDIA APEX AMP.')
else:
_logger.info('Validating in float32. AMP not enabled.')
if args.legacy_jit:
set_jit_legacy()
# create model
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=3,
global_pool=args.gp,
scriptable=args.torchscript)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True)
test_time_pool = False
if not args.no_test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True)
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
model = model.cuda()
if args.apex_amp:
model = amp.initialize(model, opt_level='O1')
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
criterion = nn.CrossEntropyLoss().cuda()
dataset = create_dataset(
root=args.data, name=args.dataset, split=args.split,
load_bytes=args.tf_preprocessing, class_map=args.class_map)
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = {int(line.rstrip()) for line in f}
valid_labels = [i in valid_labels for i in range(args.num_classes)]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
#pdb.set_trace()
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
pin_memory=args.pin_mem,
tf_preprocessing=args.tf_preprocessing)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
model(input)
end = time.time()
macs, params = get_model_complexity_info(model, data_config['input_size'], as_strings=False, print_per_layer_stat=True, verbose=True)
if args.params:
_logger.info('Params ({:}) Macs ({:})'.format(params, macs))
return
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.cuda()
input = input.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx, len(loader), batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses, top1=top1, top5=top5))
#macs, params = get_model_complexity_info(model, (3,224,224), as_strings=False, print_per_layer_stat=True, verbose=True)
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
cropt_pct=crop_pct,
interpolation=data_config['interpolation'])
#pdb.set_trace()
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f}) Params ({:}) Macs ({:})'.format(results['top1'], results['top1_err'], results['top5'], results['top5_err'], params, macs))
return results
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k'])
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, '') for n in model_names]
if len(model_cfgs):
results_file = args.results_file or './results-all.csv'
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
start_batch_size = args.batch_size
for m, c in model_cfgs:
batch_size = start_batch_size
args.model = m
args.checkpoint = c
result = OrderedDict(model=args.model)
r = {}
while not r and batch_size >= args.num_gpu:
torch.cuda.empty_cache()
try:
args.batch_size = batch_size
print('Validating with batch size: %d' % args.batch_size)
r = validate(args)
except RuntimeError as e:
if batch_size <= args.num_gpu:
print("Validation failed with no ability to reduce batch size. Exiting.")
raise e
batch_size = max(batch_size // 2, args.num_gpu)
print("Validation failed, reducing batch size by 50%")
result.update(r)
if args.checkpoint:
result['checkpoint'] = args.checkpoint
results.append(result)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
if len(results):
write_results(results_file, results)
else:
validate(args)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| [
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.jit.script",
"torch.nn.CrossEntropyLoss",
"torch.jit.optimized_execution"
] | 1.4.0 | chrisjuniorli/pytorch-image-models | bb815fa90c46b1f5f2f59a0dcddab8ce69f91dcf |
1.4 | import os
import shutil
import time
import configargparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from tensorboardX import SummaryWriter
from torch.optim.lr_scheduler import MultiStepLR
from tqdm import tqdm
import disp_models
import logger
import models
import utils_func
from dataloader import KITTILoader3D
from dataloader import KITTILoader_dataset3d
from dataloader import SceneFlowLoader
from dataloader import listflowfile
parser = configargparse.ArgParser(description='PSMNet')
parser.add('-c', '--config', required=True,
is_config_file=True, help='config file')
parser.add_argument('--save_path', type=str, default='',
help='path to save the log, tensorbaord and checkpoint')
# network
parser.add_argument('--data_type', default='depth', choices=['disparity', 'depth'],
help='the network can predict either disparity or depth')
parser.add_argument('--arch', default='SDNet', choices=['SDNet', 'PSMNet'],
help='Model Name, default: SDNet.')
parser.add_argument('--maxdisp', type=int, default=192,
help='maxium disparity, the range of the disparity cost volume: [0, maxdisp-1]')
parser.add_argument('--down', type=float, default=2,
help='reduce x times resolution when build the depth cost volume')
parser.add_argument('--maxdepth', type=int, default=80,
help='the range of the depth cost volume: [1, maxdepth]')
# dataset
parser.add_argument('--kitti2015', action='store_true',
help='If false, use 3d kitti dataset. If true, use kitti stereo 2015, default: False')
parser.add_argument('--dataset', default='kitti', choices=['sceneflow', 'kitti'],
help='train with sceneflow or kitti')
parser.add_argument('--datapath', default='',
help='root folder of the dataset')
parser.add_argument('--split_train', default='Kitti/object/train.txt',
help='data splitting file for training')
parser.add_argument('--split_val', default='Kitti/object/subval.txt',
help='data splitting file for validation')
parser.add_argument('--epochs', type=int, default=300,
help='number of training epochs')
parser.add_argument('--btrain', type=int, default=3,
help='training batch size')
parser.add_argument('--bval', type=int, default=1,
help='validation batch size')
parser.add_argument('--workers', type=int, default=8,
help='number of dataset workers')
# learning rate
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
parser.add_argument('--lr_stepsize', nargs='+', type=int, default=[200],
help='drop lr in each step')
parser.add_argument('--lr_gamma', default=0.1, type=float,
help='gamma of the learning rate scheduler')
# resume
parser.add_argument('--resume', default=None,
help='path to a checkpoint')
parser.add_argument('--pretrain', default=None,
help='path to pretrained model')
parser.add_argument('--start_epoch', type=int, default=0,
help='start epoch')
# evaluate
parser.add_argument('--evaluate', action='store_true',
help='do evaluation')
parser.add_argument('--calib_value', type=float, default=1017,
help='manually define focal length. (sceneflow does not have configuration)')
parser.add_argument('--dynamic_bs', action='store_true',
help='If true, dynamically calculate baseline from calibration file. If false, use 0.54')
parser.add_argument('--eval_interval', type=int, default=50,
help='evaluate model every n epochs')
parser.add_argument('--checkpoint_interval', type=int, default=5,
help='save checkpoint every n epoch.')
parser.add_argument('--generate_depth_map', action='store_true',
help='if true, generate depth maps and save the in save_path/depth_maps/{data_tag}/')
parser.add_argument('--data_list', default=None,
help='generate depth maps for all the data in this list')
parser.add_argument('--data_tag', default=None,
help='the suffix of the depth maps folder')
args = parser.parse_args()
best_RMSE = 1e10
def main():
global best_RMSE
# set logger
log = logger.setup_logger(os.path.join(args.save_path, 'training.log'))
for key, value in sorted(vars(args).items()):
log.info(str(key) + ': ' + str(value))
# set tensorboard
writer = SummaryWriter(args.save_path + '/tensorboardx')
# Data Loader
if args.generate_depth_map:
TrainImgLoader = None
import dataloader.KITTI_submission_loader as KITTI_submission_loader
TestImgLoader = torch.utils.data.DataLoader(
KITTI_submission_loader.SubmiteDataset(args.datapath, args.data_list, args.dynamic_bs),
batch_size=args.bval, shuffle=False, num_workers=args.workers, drop_last=False)
elif args.dataset == 'kitti':
train_data, val_data = KITTILoader3D.dataloader(args.datapath, args.split_train, args.split_val,
kitti2015=args.kitti2015)
TrainImgLoader = torch.utils.data.DataLoader(
KITTILoader_dataset3d.myImageFloder(train_data, True, kitti2015=args.kitti2015, dynamic_bs=args.dynamic_bs),
batch_size=args.btrain, shuffle=True, num_workers=args.workers, drop_last=False, pin_memory=True)
TestImgLoader = torch.utils.data.DataLoader(
KITTILoader_dataset3d.myImageFloder(val_data, False, kitti2015=args.kitti2015, dynamic_bs=args.dynamic_bs),
batch_size=args.bval, shuffle=False, num_workers=args.workers, drop_last=False, pin_memory=True)
else:
train_data, val_data = listflowfile.dataloader(args.datapath)
TrainImgLoader = torch.utils.data.DataLoader(
SceneFlowLoader.myImageFloder(train_data, True, calib=args.calib_value),
batch_size=args.btrain, shuffle=True, num_workers=args.workers, drop_last=False)
TestImgLoader = torch.utils.data.DataLoader(
SceneFlowLoader.myImageFloder(val_data, False, calib=args.calib_value),
batch_size=args.bval, shuffle=False, num_workers=args.workers, drop_last=False)
# Load Model
if args.data_type == 'disparity':
model = disp_models.__dict__[args.arch](maxdisp=args.maxdisp)
elif args.data_type == 'depth':
model = models.__dict__[args.arch](maxdepth=args.maxdepth, maxdisp=args.maxdisp, down=args.down)
else:
log.info('Model is not implemented')
assert False
# Number of parameters
log.info('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
model = nn.DataParallel(model).cuda()
torch.backends.cudnn.benchmark = True
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
scheduler = MultiStepLR(optimizer, milestones=args.lr_stepsize, gamma=args.lr_gamma)
if args.pretrain:
if os.path.isfile(args.pretrain):
log.info("=> loading pretrain '{}'".format(args.pretrain))
checkpoint = torch.load(args.pretrain)
model.load_state_dict(checkpoint['state_dict'])
else:
log.info('[Attention]: Can not find checkpoint {}'.format(args.pretrain))
if args.resume:
if os.path.isfile(args.resume):
log.info("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
args.start_epoch = checkpoint['epoch']
optimizer.load_state_dict(checkpoint['optimizer'])
best_RMSE = checkpoint['best_RMSE']
scheduler.load_state_dict(checkpoint['scheduler'])
log.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
log.info('[Attention]: Can not find checkpoint {}'.format(args.resume))
if args.generate_depth_map:
os.makedirs(args.save_path + '/depth_maps/' + args.data_tag, exist_ok=True)
tqdm_eval_loader = tqdm(TestImgLoader, total=len(TestImgLoader))
for batch_idx, (imgL_crop, imgR_crop, calib, H, W, filename) in enumerate(tqdm_eval_loader):
pred_disp = inference(imgL_crop, imgR_crop, calib, model)
for idx, name in enumerate(filename):
np.save(args.save_path + '/depth_maps/' + args.data_tag + '/' + name, pred_disp[idx][-H[idx]:, :W[idx]])
import sys
sys.exit()
# evaluation
if args.evaluate:
evaluate_metric = utils_func.Metric()
## training ##
for batch_idx, (imgL_crop, imgR_crop, disp_crop_L, calib) in enumerate(TestImgLoader):
start_time = time.time()
test(imgL_crop, imgR_crop, disp_crop_L, calib, evaluate_metric, model)
log.info(evaluate_metric.print(batch_idx, 'EVALUATE') + ' Time:{:.3f}'.format(time.time() - start_time))
import sys
sys.exit()
for epoch in range(args.start_epoch, args.epochs):
scheduler.step()
## training ##
train_metric = utils_func.Metric()
tqdm_train_loader = tqdm(TrainImgLoader, total=len(TrainImgLoader))
for batch_idx, (imgL_crop, imgR_crop, disp_crop_L, calib) in enumerate(tqdm_train_loader):
# start_time = time.time()
train(imgL_crop, imgR_crop, disp_crop_L, calib, train_metric, optimizer, model)
# log.info(train_metric.print(batch_idx, 'TRAIN') + ' Time:{:.3f}'.format(time.time() - start_time))
log.info(train_metric.print(0, 'TRAIN Epoch' + str(epoch)))
train_metric.tensorboard(writer, epoch, token='TRAIN')
# lw.update(train_metric.get_info(), epoch, 'Train')
## testing ##
is_best = False
if (epoch % args.eval_interval) == 0:
test_metric = utils_func.Metric()
tqdm_test_loader = tqdm(TestImgLoader, total=len(TestImgLoader))
for batch_idx, (imgL_crop, imgR_crop, disp_crop_L, calib) in enumerate(tqdm_test_loader):
# start_time = time.time()
test(imgL_crop, imgR_crop, disp_crop_L, calib, test_metric, model)
# log.info(test_metric.print(batch_idx, 'TEST') + ' Time:{:.3f}'.format(time.time() - start_time))
log.info(test_metric.print(0, 'TEST Epoch' + str(epoch)))
test_metric.tensorboard(writer, epoch, token='TEST')
# SAVE
is_best = test_metric.RMSELIs.avg < best_RMSE
best_RMSE = min(test_metric.RMSELIs.avg, best_RMSE)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_RMSE': best_RMSE,
'scheduler': scheduler.state_dict(),
'optimizer': optimizer.state_dict(),
}, is_best, epoch, folder=args.save_path)
# lw.done()
def save_checkpoint(state, is_best, epoch, filename='checkpoint.pth.tar', folder='result/default'):
torch.save(state, folder + '/' + filename)
if is_best:
shutil.copyfile(folder + '/' + filename, folder + '/model_best.pth.tar')
if args.checkpoint_interval > 0 and (epoch + 1) % args.checkpoint_interval == 0:
shutil.copyfile(folder + '/' + filename, folder + '/checkpoint_{}.pth.tar'.format(epoch + 1))
def train(imgL, imgR, depth, calib, metric_log, optimizer, model):
model.train()
calib = calib.float()
imgL, imgR, depth, calib = imgL.cuda(), imgR.cuda(), depth.cuda(), calib.cuda()
# ---------
mask = (depth >= 1) * (depth <= 80)
mask.detach_()
# ----
optimizer.zero_grad()
output1, output2, output3 = model(imgL, imgR, calib)
output1 = torch.squeeze(output1, 1)
output2 = torch.squeeze(output2, 1)
output3 = torch.squeeze(output3, 1)
if args.data_type == 'disparity':
output1 = disp2depth(output1, calib)
output2 = disp2depth(output2, calib)
output3 = disp2depth(output3, calib)
loss = 0.5 * F.smooth_l1_loss(output1[mask], depth[mask], size_average=True) + 0.7 * F.smooth_l1_loss(
output2[mask], depth[mask], size_average=True) + F.smooth_l1_loss(output3[mask], depth[mask],
size_average=True)
metric_log.calculate(depth, output3, loss=loss.item())
loss.backward()
optimizer.step()
def inference(imgL, imgR, calib, model):
model.eval()
imgL, imgR, calib = imgL.cuda(), imgR.cuda(), calib.float().cuda()
with torch.no_grad():
output = model(imgL, imgR, calib)
if args.data_type == 'disparity':
output = disp2depth(output, calib)
pred_disp = output.data.cpu().numpy()
return pred_disp
def test(imgL, imgR, depth, calib, metric_log, model):
model.eval()
calib = calib.float()
imgL, imgR, calib, depth = imgL.cuda(), imgR.cuda(), calib.cuda(), depth.cuda()
mask = (depth >= 1) * (depth <= 80)
mask.detach_()
with torch.no_grad():
output3 = model(imgL, imgR, calib)
output3 = torch.squeeze(output3, 1)
if args.data_type == 'disparity':
output3 = disp2depth(output3, calib)
loss = F.smooth_l1_loss(output3[mask], depth[mask], size_average=True)
metric_log.calculate(depth, output3, loss=loss.item())
torch.cuda.empty_cache()
return
def disp2depth(disp, calib):
depth = calib[:, None, None] / disp.clamp(min=1e-8)
return depth
if __name__ == '__main__':
main()
| [
"torch.nn.functional.smooth_l1_loss",
"torch.save",
"torch.no_grad",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.squeeze",
"torch.cuda.empty_cache",
"torch.load",
"torch.nn.DataParallel"
] | 1.4.0 | wodxyj/plpp | cd74916536cf180a37b088ec61ea2a12a63719f2 |
1.1 | from __future__ import absolute_import
from torch import nn
from torch.nn import functional as F
from torch.nn import init
import torchvision
import torch
import pdb
from .layers import (
SpatialAttention2d,
WeightedSum2d)
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
class ResNet(nn.Module):
__factory = {
18: torchvision.models.resnet18,
34: torchvision.models.resnet34,
50: torchvision.models.resnet50,
101: torchvision.models.resnet101,
152: torchvision.models.resnet152,
}
def __init__(self, depth, pretrained=True, cut_at_pooling=False, is_select=False,
num_features=0, norm=False, dropout=0, num_classes=0):
super(ResNet, self).__init__()
self.pretrained = pretrained
self.depth = depth
self.cut_at_pooling = cut_at_pooling
self.is_select = is_select
# Construct base (pretrained) resnet
if depth not in ResNet.__factory:
raise KeyError("Unsupported depth:", depth)
resnet = ResNet.__factory[depth](pretrained=pretrained)
resnet.layer4[0].conv2.stride = (1,1)
resnet.layer4[0].downsample[0].stride = (1,1)
self.base = nn.Sequential(
resnet.conv1, resnet.bn1, resnet.maxpool, # no relu
resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4)
self.gap = nn.AdaptiveAvgPool2d(1)
if not self.cut_at_pooling:
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = num_features > 0 # false
self.num_classes = num_classes
out_planes = resnet.fc.in_features
# Append new layers
if self.has_embedding: # false
self.feat = nn.Linear(out_planes, self.num_features)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat.bias, 0)
else: # 进入这里
# Change the num_features to CNN output channels
self.num_features = out_planes # out_planes = 2048 num_features 重新被赋值 2048
self.num_features_delg = 512
self.feat_bn = nn.BatchNorm1d(self.num_features_delg)
self.feat_bn.bias.requires_grad_(False)
if self.dropout > 0:
self.drop = nn.Dropout(self.dropout)
if self.num_classes > 0:
self.classifier = nn.Linear(self.num_features_delg, self.num_classes, bias=False)
init.normal_(self.classifier.weight, std=0.001)
## wangzy add attention
self.attention = SpatialAttention2d(in_c=self.num_features, act_fn='relu')
self.weightSum = WeightedSum2d()
init.constant_(self.feat_bn.weight, 1)
init.constant_(self.feat_bn.bias, 0)
if not pretrained:
self.reset_params()
def forward(self, x, feature_withbn=False):
x = self.base(x) # b x c x H x w C = 2048 即:32 2048 16 8
# 1*1 conv 512
original_fea = x
# x = self.gap(x)
# x = x.view(x.size(0), -1)
'''wangzy add attention'''
x, att_score = self.attention(x) # 32 1 16 8 比如说取前64个
# x torch.Size([32, 512, 16, 8]) att_score torch.Size([32, 1, 16, 8])
# print(att_score)
# x = self.weightSum([x,att_score])#回乘att_score分数
x = self.gap(x) # 32*512*1*1
# print('------------------------------------------------------------')
# print(x)
x = x.view(-1, x.size()[1]) # 32 512
features = x
# print("features:",features.shape)
# pdb.set_trace()
if self.cut_at_pooling: # False
return features
if self.has_embedding: # false
bn_x = self.feat_bn(self.feat(features))
else: # 进入这里
bn_x = self.feat_bn(features)
# print("training:", self.training) ### 不确定!
if self.training is False: ## 分情况 pretrain的时候 应该是 true target finetune 确定是 false
prob = self.classifier(bn_x)
bn_x = F.normalize(bn_x)
return bn_x, prob, original_fea, att_score ### !!!! finetune 的时候从这里 return
# return bn_x, self.feat_bn(original_fea), att_score ### !!!! finetune 的时候从这里 return
if self.norm: # False
bn_x = F.normalize(bn_x)
elif self.has_embedding:
bn_x = F.relu(bn_x)
if self.dropout > 0: # False
bn_x = self.drop(bn_x)
if self.num_classes > 0: # True
prob = self.classifier(bn_x)
else:
return x, bn_x
if feature_withbn: # False
return bn_x, prob
return features, prob, original_fea, att_score
#att_score (16,1,16,8)
#original_fea(16,2048,16,8)
#prob (16,12936)
#features (16,2048)
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
resnet = ResNet.__factory[self.depth](pretrained=self.pretrained)
self.base[0].load_state_dict(resnet.conv1.state_dict())
self.base[1].load_state_dict(resnet.bn1.state_dict())
self.base[2].load_state_dict(resnet.maxpool.state_dict())
self.base[3].load_state_dict(resnet.layer1.state_dict())
self.base[4].load_state_dict(resnet.layer2.state_dict())
self.base[5].load_state_dict(resnet.layer3.state_dict())
self.base[6].load_state_dict(resnet.layer4.state_dict())
def resnet18(**kwargs):
return ResNet(18, **kwargs)
def resnet34(**kwargs):
return ResNet(34, **kwargs)
def resnet50(**kwargs):
return ResNet(50, **kwargs)
def resnet101(**kwargs):
return ResNet(101, **kwargs)
def resnet152(**kwargs):
return ResNet(152, **kwargs)
| [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.nn.functional.relu",
"torch.nn.init.normal_",
"torch.nn.BatchNorm1d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.1.0 | wangyuan249/Mymmt767 | 6b9bb566d290bd3157350f6496fcb5df8c2b515c |
1.3 | import numpy as np
import torch
from torch.optim import Adam
import gym
import time
import spinup.algos.pytorch.ppo.core as core
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_pytorch import setup_pytorch_for_mpi, sync_params, mpi_avg_grads
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
from rlschool import make_env
class PPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32) # reward to go
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
self.adv_buf = (self.adv_buf - adv_mean) / adv_std
data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,
adv=self.adv_buf, logp=self.logp_buf)
return {k: torch.as_tensor(v, dtype=torch.float32) for k, v in data.items()}
def ppo(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0,
steps_per_epoch=4000, epochs=50, gamma=0.99, clip_ratio=0.2, pi_lr=3e-4,
vf_lr=1e-3, train_pi_iters=80, train_v_iters=80, lam=0.97, max_ep_len=1000,
target_kl=0.01, logger_kwargs=dict(), save_freq=10):
# Special function to avoid certain slowdowns from PyTorch + MPI combo.
setup_pytorch_for_mpi()
# Set up logger and save configuration
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
seed += 10000 * proc_id()
torch.manual_seed(seed)
np.random.seed(seed)
# Instantiate environment
# env = env_fn()
# obs_dim = env.observation_space.shape
# act_dim = env.action_space.shape
env = make_env("Quadrotor", task="hovering_control")
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Create actor-critic module
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
# Sync params across processes
sync_params(ac)
# Count variables
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.v])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n' % var_counts)
# Set up experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Set up function for computing PPO policy loss
def compute_loss_pi(data):
obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']
# Policy loss
pi, logp = ac.pi(obs, act)
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1 - clip_ratio, 1 + clip_ratio) * adv
loss_pi = -(torch.min(ratio * adv, clip_adv)).mean()
# Useful extra info
approx_kl = (logp_old - logp).mean().item()
ent = pi.entropy().mean().item()
clipped = ratio.gt(1 + clip_ratio) | ratio.lt(1 - clip_ratio)
clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
# Set up function for computing value loss
def compute_loss_v(data):
obs, ret = data['obs'], data['ret']
return ((ac.v(obs) - ret) ** 2).mean()
# Set up optimizers for policy and value function
pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)
vf_optimizer = Adam(ac.v.parameters(), lr=vf_lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update():
data = buf.get()
pi_l_old, pi_info_old = compute_loss_pi(data)
pi_l_old = pi_l_old.item()
v_l_old = compute_loss_v(data).item()
# Train policy with multiple steps of gradient descent
for i in range(train_pi_iters):
pi_optimizer.zero_grad()
loss_pi, pi_info = compute_loss_pi(data)
kl = mpi_avg(pi_info['kl'])
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.' % i)
break
loss_pi.backward()
mpi_avg_grads(ac.pi) # average grads across MPI processes
pi_optimizer.step()
logger.store(StopIter=i)
# Value function learning
for i in range(train_v_iters):
vf_optimizer.zero_grad()
loss_v = compute_loss_v(data)
loss_v.backward()
mpi_avg_grads(ac.v) # average grads across MPI processes
vf_optimizer.step()
# Log changes from update
kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']
logger.store(LossPi=pi_l_old, LossV=v_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(loss_pi.item() - pi_l_old),
DeltaLossV=(loss_v.item() - v_l_old))
# Prepare for interaction with environment
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
for t in range(local_steps_per_epoch):
a, v, logp = ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# save and log
buf.store(o, a, r, v, logp)
logger.store(VVals=v)
# Update obs (critical!)
o = next_o
timeout = ep_len == max_ep_len
terminal = d or timeout
epoch_ended = t == local_steps_per_epoch - 1
if terminal or epoch_ended:
if epoch_ended and not (terminal):
print('Warning: trajectory cut off by epoch at %d steps.' % ep_len, flush=True)
# if trajectory didn't reach terminal state, bootstrap value target
if timeout or epoch_ended:
_, v, _ = ac.step(torch.as_tensor(o, dtype=torch.float32))
else:
v = 0
buf.finish_path(v)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Save model
if (epoch % save_freq == 0) or (epoch == epochs - 1):
logger.save_state({'env': env}, None)
# Perform PPO update!
update()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', (epoch + 1) * steps_per_epoch)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('Entropy', average_only=True)
logger.log_tabular('KL', average_only=True)
logger.log_tabular('ClipFrac', average_only=True)
logger.log_tabular('StopIter', average_only=True)
logger.log_tabular('Time', time.time() - start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--hid', type=int, default=64)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=4000)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='ppo')
args = parser.parse_args()
mpi_fork(args.cpu) # run parallel code with mpi
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ppo(lambda : make_env("Quadrotor", task="hovering_control"), actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l), gamma=args.gamma,
seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs,
logger_kwargs=logger_kwargs) | [
"torch.min",
"torch.clamp",
"torch.manual_seed",
"torch.as_tensor",
"torch.exp"
] | 1.3.1 | ANCL/QuadPPO | b7ed0574467bd321f4259175621a12ff7aeb7d12 |
1.2 | #pylint: disable=invalid-name
import numpy as np
import torch
from torch import nn
from aw_nas import ops
from aw_nas.utils.exception import expect, ConfigException
from aw_nas.weights_manager.rnn_shared import RNNSharedNet, INIT_RANGE
class RNNGenotypeModel(RNNSharedNet):
REGISTRY = "final_model"
NAME = "rnn_model"
def __init__(self, search_space, device, genotypes,
num_tokens, num_emb=300, num_hid=300,
tie_weight=True, decoder_bias=True,
share_primitive_weights=False, share_from_weights=False,
batchnorm_step=False,
batchnorm_edge=False, batchnorm_out=True,
# training
max_grad_norm=5.0,
# dropout probs
dropout_emb=0., dropout_inp0=0., dropout_inp=0., dropout_hid=0., dropout_out=0.):
self.genotypes = genotypes
if isinstance(genotypes, str):
self.genotypes = eval("search_space.genotype_type({})".format(self.genotypes)) # pylint: disable=eval-used
self.genotypes = list(self.genotypes._asdict().values())
# check tos:
_tos = [conn[2] for conn in self.genotypes[0]]
(np.argsort(_tos) == np.arange(len(_tos))).all()
expect((np.argsort(_tos) == np.arange(len(_tos))).all(),
"genotype must be ordered in the way that `to_node` monotonously increase",
ConfigException)
super(RNNGenotypeModel, self).__init__(
search_space, device,
cell_cls=RNNGenotypeCell, op_cls=None,
num_tokens=num_tokens, num_emb=num_emb, num_hid=num_hid,
tie_weight=tie_weight, decoder_bias=decoder_bias,
share_primitive_weights=share_primitive_weights, share_from_weights=share_from_weights,
batchnorm_step=batchnorm_step,
batchnorm_edge=batchnorm_edge, batchnorm_out=batchnorm_out,
max_grad_norm=max_grad_norm,
dropout_emb=dropout_emb, dropout_inp0=dropout_inp0, dropout_inp=dropout_inp,
dropout_hid=dropout_hid, dropout_out=dropout_out,
genotypes=self.genotypes) # this genotypes will be used for construction/forward
self.logger.info("Genotype: %s", self.genotypes)
def forward(self, inputs, hiddens): #pylint: disable=arguments-differ
# this genotypes will not be used
return RNNSharedNet.forward(self, inputs, self.genotypes, hiddens)
@classmethod
def supported_rollout_types(cls):
# this should not be called
# assert 0, "should not be called"
return []
def assemble_candidate(self, *args, **kwargs): #pylint: disable=arguments-differ
# this will not be called
assert 0, "should not be called"
class RNNGenotypeCell(nn.Module):
def __init__(self, search_space, device, op_cls, num_emb, num_hid,
share_from_weights, batchnorm_step,
batchnorm_edge, batchnorm_out, genotypes, **kwargs):
super(RNNGenotypeCell, self).__init__()
self.genotypes = genotypes
self.search_space = search_space
self.num_emb = num_emb
self.num_hid = num_hid
self.batchnorm_step = batchnorm_step
self.batchnorm_edge = batchnorm_edge
self.batchnorm_out = batchnorm_out
self.share_from_w = share_from_weights
self._steps = search_space.num_steps
self._num_init = search_space.num_init_nodes
# the first step, convert input x and previous hidden
self.w_prev = nn.Linear(num_emb + num_hid, 2 * num_hid, bias=False)
self.w_prev.weight.data.uniform_(-INIT_RANGE, INIT_RANGE)
if self.batchnorm_edge:
# batchnorm on each edge/connection
# when `num_node_inputs==1`, there is `step + 1` edges
# the first bn
self.bn_prev = nn.BatchNorm1d(num_emb + num_hid, affine=True)
# other bn
self.bn_edges = nn.ModuleList([nn.BatchNorm1d(num_emb + num_hid, affine=True)
for _ in range(len(self.genotypes[0]))])
if self.batchnorm_step:
# batchnorm after every step (as in darts's implementation)
self.bn_steps = nn.ModuleList([nn.BatchNorm1d(num_hid, affine=False)
for _ in range(self._steps+1)])
if self.batchnorm_out:
# the out bn
self.bn_out = nn.BatchNorm1d(num_hid, affine=True)
if self.share_from_w:
# actually, as `num_node_inputs==1`, thus only one from node is used each step
# `share_from_w==True/False` are equivalent in final training...
self.step_weights = nn.ModuleList([
nn.Linear(num_hid, 2*num_hid, bias=False)
for _ in range(self._steps)])
[mod.weight.data.uniform_(-INIT_RANGE, INIT_RANGE) for mod in self.step_weights]
# initiatiate op on edges
self.Ws = nn.ModuleList()
self.ops = nn.ModuleList()
genotype_, _ = self.genotypes
for op_type, _, _ in genotype_:
# edge weights
op = ops.get_op(op_type)()
self.ops.append(op)
if not self.share_from_w:
W = nn.Linear(self.num_hid, 2 * self.num_hid, bias=False)
W.weight.data.uniform_(-INIT_RANGE, INIT_RANGE)
self.Ws.append(W)
def forward(self, inputs, hidden, x_mask, h_mask, genotypes): #pylint: disable=arguments-differ
"""
Cell forward, forward for one timestep.
"""
genotype, concat_ = self.genotypes # self.genotypes == genotypes
s0 = self._compute_init_state(inputs, hidden, x_mask, h_mask)
if self.batchnorm_step:
s0 = self.bn_steps[0](s0)
states = {0: s0}
for i, (_, from_, to_) in enumerate(genotype):
s_prev = states[from_]
s_inputs = s_prev
if self.training:
s_inputs = s_prev * h_mask
w = self.step_weights[to_-1] if self.share_from_w else self.Ws[i]
ch = w(s_inputs)
if self.batchnorm_edge:
ch = self.bn_edges[i](ch)
c, h = torch.split(ch, self.num_hid, dim=-1)
c = c.sigmoid()
h = self.ops[i](h)
out = s_prev + c * (h - s_prev)
if to_ in states:
states[to_] = states[to_] + out
else:
states[to_] = out
to_finish = i == len(genotype)-1 or genotype[i+1][2] != to_
if self.batchnorm_step and to_finish:
# if the calculation of the `to_` step finished, batch norm it
states[to_] = self.bn_steps[to_](states[to_])
# average the ends
output = torch.mean(torch.stack([states[i] for i in concat_]), 0)
if self.batchnorm_out:
# batchnorm
output = self.bn_out(output)
return output
def _compute_init_state(self, x, h, x_mask, h_mask):
if self.training:
xh_prev = torch.cat([x * x_mask, h * h_mask], dim=-1)
else:
xh_prev = torch.cat([x, h], dim=-1)
xh_prev = self.w_prev(xh_prev)
if self.batchnorm_edge:
xh_prev = self.bn_prev(xh_prev)
c0, h0 = torch.split(xh_prev, self.num_hid, dim=-1)
c0 = c0.sigmoid()
h0 = h0.tanh()
s0 = h + c0 * (h0 - h)
return s0
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.split",
"torch.nn.BatchNorm1d"
] | 1.2.0 | Harald-R/aw_nas | 8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783 |
1.2 | # -*- coding: utf-8 -*-
import os
import random
import functools
import six
import numpy as np
import torch
from torch import nn
from torch.utils.data.distributed import DistributedSampler
from aw_nas import utils
from aw_nas.final.base import FinalTrainer
from aw_nas.final.bnn_model import BNNGenotypeModel
from aw_nas.utils.common_utils import nullcontext
from aw_nas.utils.exception import expect
from aw_nas.utils import DataParallel
from aw_nas.utils import DistributedDataParallel
from aw_nas.utils.torch_utils import calib_bn, GroupSampler, DistributedGroupSampler
from aw_nas.utils.parallel_utils import get_dist_info
try:
from torch.nn import SyncBatchNorm
convert_sync_bn = SyncBatchNorm.convert_sync_batchnorm
except ImportError:
utils.getLogger("cnn_trainer").warn(
"Import convert_sync_bn failed! SyncBatchNorm might not work!")
convert_sync_bn = lambda m: m
def _warmup_update_lr(optimizer, epoch, init_lr, warmup_epochs, warmup_ratio=0.0):
"""
update learning rate of optimizers
"""
lr = (init_lr - warmup_ratio) * epoch / warmup_epochs + warmup_ratio
for param_group in optimizer.param_groups:
param_group["lr"] = lr
return lr
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
class CNNFinalTrainer(FinalTrainer): #pylint: disable=too-many-instance-attributes
NAME = "cnn_trainer"
def __init__(self, model, dataset, device, gpus, objective,#pylint: disable=dangerous-default-value
multiprocess=False,
epochs=600, batch_size=96,
optimizer_type="SGD", optimizer_kwargs=None,
learning_rate=0.025, momentum=0.9,
warmup_epochs=0,
optimizer_scheduler={
"type": "CosineAnnealingLR",
"T_max": 600,
"eta_min": 0.001
},
weight_decay=3e-4, no_bias_decay=False,
grad_clip=5.0,
auxiliary_head=False, auxiliary_weight=0.4,
add_regularization=False,
save_as_state_dict=False,
workers_per_queue=2,
eval_no_grad=True,
eval_every=1,
eval_batch_size=1,
calib_bn_setup=False, # for OFA final model
seed=None,
schedule_cfg=None):
super(CNNFinalTrainer, self).__init__(schedule_cfg)
self.model = model
self.parallel_model = None
self.dataset = dataset
self.device = device
self.gpus = gpus
self.multiprocess = multiprocess
self.objective = objective
self._perf_func = self.objective.get_perfs
self._perf_names = self.objective.perf_names()
self._obj_loss = self.objective.get_loss
self.epochs = epochs
self.warmup_epochs = warmup_epochs
self.optimizer_type = optimizer_type
self.optimizer_kwargs = optimizer_kwargs
self.learning_rate = learning_rate
self.grad_clip = grad_clip
self.auxiliary_head = auxiliary_head
self.auxiliary_weight = auxiliary_weight
self.add_regularization = add_regularization
self.save_as_state_dict = save_as_state_dict
self.eval_no_grad = eval_no_grad
self.eval_every = eval_every
self.calib_bn_setup = calib_bn_setup
# for optimizer
self.weight_decay = weight_decay
self.no_bias_decay = no_bias_decay
self.learning_rate = learning_rate
self.momentum = momentum
self.optimizer_scheduler_cfg = optimizer_scheduler
self._criterion = nn.CrossEntropyLoss().to(self.device)
_splits = self.dataset.splits()
train_kwargs = getattr(_splits["train"], "kwargs", {})
test_kwargs = getattr(_splits["test"], "kwargs", train_kwargs)
"""
GroupSampler is needed when `keep_ratio` in dataset is set True.
It makes two group of images: aspect ratio > 1 , and aspect ratio < 1.
`shuffle` is invalid when using GroupSampler because it cannot
guarantee the original order of images.
"""
group = train_kwargs.pop("group_sample", False)
test_kwargs["shuffle"] = False
if self.multiprocess:
sampler = DistributedGroupSampler(_splits["train"], None,
batch_size) if group \
else DistributedSampler(_splits["train"], shuffle=True)
test_kwargs["sampler"] = DistributedSampler(_splits["test"],
shuffle=False)
else:
sampler = GroupSampler(_splits["train"], None, batch_size) if group \
else None
if sampler is None:
train_kwargs["shuffle"] = True
else:
train_kwargs.pop("shuffle", None)
train_kwargs["sampler"] = sampler
rank, world_size = get_dist_info()
init_fn = functools.partial(worker_init_fn, num_workers=workers_per_queue, rank=rank,
seed=seed) if seed is not None else None
self.train_queue = torch.utils.data.DataLoader(
_splits["train"], batch_size=batch_size, pin_memory=False,
num_workers=workers_per_queue,
worker_init_fn=init_fn,
**train_kwargs)
self.valid_queue = torch.utils.data.DataLoader(
_splits["test"], batch_size=eval_batch_size, pin_memory=False,
num_workers=workers_per_queue, **test_kwargs)
if self.calib_bn_setup:
self.model = calib_bn(self.model, self.train_queue)
# optimizer and scheduler is called in `trainer.setup` call
self.optimizer = None
self.scheduler = None
# states of the trainer
self.last_epoch = 0
self.epoch = 0
self.save_every = None
self.report_every = None
self.train_dir = None
self._is_setup = False
def setup(self, load=None, load_state_dict=None,
save_every=None, train_dir=None, report_every=50):
expect(not (load is not None and load_state_dict is not None),
"`load` and `load_state_dict` cannot be passed simultaneously.")
if load is not None:
self.load(load)
else:
assert self.model is not None
if load_state_dict is not None:
self._load_state_dict(load_state_dict)
self.logger.info("param size = {} M".format( \
utils.count_parameters(
self.model,
count_binary=isinstance(self.model, BNNGenotypeModel))/1.e6))
if self.model is not None:
self._parallelize()
self.optimizer = self._init_optimizer()
self.scheduler = self._init_scheduler(self.optimizer, self.optimizer_scheduler_cfg)
self.save_every = save_every
self.train_dir = train_dir
self.report_every = report_every
expect(self.save_every is None or self.train_dir is not None,
"when `save_every` is not None, make sure `train_dir` is not None")
self._is_setup = True
def save(self, path):
rank = (os.environ.get("LOCAL_RANK"))
if rank is not None and rank != '0':
return
path = utils.makedir(path)
if self.save_as_state_dict:
torch.save(self.model.state_dict(), os.path.join(path, "model_state.pt"))
else:
# save the model directly instead of the state_dict,
# so that it can be loaded and run directly, without specificy configuration
torch.save(self.model, os.path.join(path, "model.pt"))
torch.save({
"epoch": self.epoch,
"optimizer":self.optimizer.state_dict()
}, os.path.join(path, "optimizer.pt"))
if self.scheduler is not None:
torch.save(self.scheduler.state_dict(), os.path.join(path, "scheduler.pt"))
self.logger.info("Saved checkpoint to %s", path)
def load(self, path):
# load the model
m_path = os.path.join(path, "model.pt") if os.path.isdir(path) else path
if not os.path.exists(m_path):
m_path = os.path.join(path, "model_state.pt")
self._load_state_dict(m_path)
else:
self.model = torch.load(m_path, map_location=torch.device("cpu"))
self.model.to(self.device)
self._parallelize()
log_strs = ["model from {}".format(m_path)]
# init the optimzier/scheduler
self.optimizer = self._init_optimizer()
self.scheduler = self._init_scheduler(self.optimizer, self.optimizer_scheduler_cfg)
o_path = os.path.join(path, "optimizer.pt") if os.path.isdir(path) else None
if o_path and os.path.exists(o_path):
checkpoint = torch.load(o_path, map_location=torch.device("cpu"))
self.optimizer.load_state_dict(checkpoint["optimizer"])
log_strs.append("optimizer from {}".format(o_path))
self.last_epoch = checkpoint["epoch"]
# load the optimizer/scheduler
if self.scheduler is not None:
s_path = os.path.join(path, "scheduler.pt") if os.path.isdir(path) else None
if s_path and os.path.exists(s_path):
self.scheduler.load_state_dict(torch.load(s_path, map_location=torch.device("cpu")))
log_strs.append("scheduler from {}".format(s_path))
self.logger.info("param size = %f M",
utils.count_parameters(self.model) / 1.e6)
self.logger.info("Loaded checkpoint from %s: %s", path, ", ".join(log_strs))
self.logger.info("Last epoch: %d", self.last_epoch)
def train(self):
if len(self.gpus) >= 2:
self._forward_once_for_flops(self.model)
# save the model.log
if self.train_dir is not None:
with open(os.path.join(self.train_dir, "model.log"),"w") as f:
f.write(str(self.model))
for epoch in range(self.last_epoch+1, self.epochs+1):
self.epoch = epoch
self.on_epoch_start(epoch)
if epoch < self.warmup_epochs:
_warmup_update_lr(self.optimizer, epoch, self.learning_rate, self.warmup_epochs)
else:
if self.scheduler is not None and epoch != 1:
self.scheduler.step()
self.logger.info("epoch %d lr %e", epoch, self.optimizer.param_groups[0]["lr"])
train_acc, train_obj = self.train_epoch(self.train_queue, self.parallel_model,
self._criterion, self.optimizer,
self.device, epoch)
self.logger.info("train_acc %f ; train_obj %f", train_acc, train_obj)
if self.save_every and epoch % self.save_every == 0:
path = os.path.join(self.train_dir, str(epoch))
self.save(path)
if epoch % self.eval_every == 0:
valid_acc, valid_obj, valid_perfs = self.infer_epoch(self.valid_queue,
self.parallel_model,
self._criterion, self.device)
self.logger.info("valid_acc %f ; valid_obj %f ; valid performances: %s",
valid_acc, valid_obj,
"; ".join(
["{}: {:.3f}".format(n, v) for n, v in valid_perfs.items()]))
self.on_epoch_end(epoch)
self.save(os.path.join(self.train_dir, "final"))
def evaluate_split(self, split):
if len(self.gpus) >= 2:
self._forward_once_for_flops(self.model)
assert split in {"train", "test"}
if split == "test":
queue = self.valid_queue
else:
queue = self.train_queue
acc, obj, perfs = self.infer_epoch(queue, self.parallel_model,
self._criterion, self.device)
self.logger.info("acc %f ; obj %f ; performance: %s", acc, obj,
"; ".join(
["{}: {:.3f}".format(n, v) for n, v in perfs.items()]))
return acc, obj
@classmethod
def supported_data_types(cls):
return ["image"]
def _load_state_dict(self, path):
# load state dict
checkpoint = torch.load(path, map_location=torch.device("cpu"))
extra_keys = set(checkpoint.keys()).difference(set(self.model.state_dict().keys()))
if extra_keys:
self.logger.error("%d extra keys in checkpoint! "
"Make sure the genotype match", len(extra_keys))
missing_keys = {key for key in set(self.model.state_dict().keys())\
.difference(checkpoint.keys()) \
if "auxiliary" not in key}
if missing_keys:
self.logger.error(("{} missing keys will not be loaded! Check your genotype, "
"This should be due to you're using the state dict dumped by"
" `awnas eval-arch --save-state-dict` in an old version, "
"and your genotype actually skip some "
"cells, which might means, many parameters of your "
"sub-network is not actually active, "
"and this genotype might not be so effective.")
.format(len(missing_keys)))
self.logger.error(str(missing_keys))
self.logger.info(self.model.load_state_dict(checkpoint, strict=False))
def _parallelize(self):
if self.multiprocess:
self.model = convert_sync_bn(self.model).to(self.device)
self.parallel_model = DistributedDataParallel(
self.model, self.gpus, broadcast_buffers=False, find_unused_parameters=True)
elif len(self.gpus) >= 2:
self.parallel_model = DataParallel(self.model, self.gpus).to(self.device)
else:
self.parallel_model = self.model
def _init_optimizer(self):
group_weight = []
group_bias = []
for name, param in self.model.named_parameters():
if "bias" in name:
group_bias.append(param)
else:
group_weight.append(param)
assert len(list(self.model.parameters())) == len(group_weight) + len(group_bias)
optim_cls = getattr(torch.optim, self.optimizer_type)
if self.optimizer_type == "Adam":
optim_kwargs = {
"lr": self.learning_rate,
"weight_decay": self.weight_decay
}
else:
optim_kwargs = {
"lr": self.learning_rate,
"momentum": self.momentum,
"weight_decay": self.weight_decay
}
optim_kwargs.update(self.optimizer_kwargs or {})
optimizer = optim_cls(
[{"params": group_weight},
{"params": group_bias,
"weight_decay": 0 if self.no_bias_decay else self.weight_decay}],
**optim_kwargs)
return optimizer
@staticmethod
def _init_scheduler(optimizer, cfg):
if cfg:
cfg = {k:v for k, v in six.iteritems(cfg)}
sch_cls = utils.get_scheduler_cls(cfg.pop("type"))
return sch_cls(optimizer, **cfg)
return None
def train_epoch(self, train_queue, model, criterion, optimizer, device, epoch):
expect(self._is_setup, "trainer.setup should be called first")
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.train()
for step, (inputs, target) in enumerate(train_queue):
inputs = inputs.to(device)
target = target.to(device)
optimizer.zero_grad()
if self.auxiliary_head: # assume model return two logits in train mode
logits, logits_aux = model(inputs)
loss = self._obj_loss(inputs, logits, target, model,
add_evaluator_regularization=self.add_regularization)
loss_aux = criterion(logits_aux, target)
loss += self.auxiliary_weight * loss_aux
else:
logits = model(inputs)
loss = self._obj_loss(inputs, logits, target, model,
add_evaluator_regularization=self.add_regularization)
#torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM)
loss.backward()
if isinstance(self.grad_clip, (int, float)) and self.grad_clip > 0:
nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = inputs.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
del loss
if step % self.report_every == 0:
self.logger.info("train %03d %.3f; %.2f%%; %.2f%%",
step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer_epoch(self, valid_queue, model, criterion, device):
expect(self._is_setup, "trainer.setup should be called first")
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
objective_perfs = utils.OrderedStats()
all_perfs = []
model.eval()
context = torch.no_grad if self.eval_no_grad else nullcontext
with context():
for step, (inputs, target) in enumerate(valid_queue):
inputs = inputs.to(device)
target = target.to(device)
logits = model(inputs)
loss = criterion(logits, target)
perfs = self._perf_func(inputs, logits, target, model)
all_perfs.append(perfs)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = inputs.size(0)
# objective_perfs.update(dict(zip(self._perf_names, perfs)), n=n)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
del loss
if step % self.report_every == 0:
all_perfs_by_name = list(zip(*all_perfs))
# support use objective aggregate fn, for stat method other than mean
# e.g., adversarial distance median; detection mAP (see det_trainer.py)
obj_perfs = {
k: self.objective.aggregate_fn(k, False)(v)
for k, v in zip(self._perf_names, all_perfs_by_name)
}
self.logger.info("valid %03d %e %f %f %s", step, objs.avg, top1.avg, top5.avg,
"; ".join(["{}: {:.3f}".format(perf_n, v) \
# for perf_n, v in objective_perfs.avgs().items()]))
for perf_n, v in obj_perfs.items()]))
all_perfs_by_name = list(zip(*all_perfs))
obj_perfs = {
k: self.objective.aggregate_fn(k, False)(v)
for k, v in zip(self._perf_names, all_perfs_by_name)
}
return top1.avg, objs.avg, obj_perfs
def on_epoch_start(self, epoch):
super(CNNFinalTrainer, self).on_epoch_start(epoch)
self.model.on_epoch_start(epoch)
self.objective.on_epoch_start(epoch)
def on_epoch_end(self, epoch):
super(CNNFinalTrainer, self).on_epoch_end(epoch)
self.model.on_epoch_end(epoch)
self.objective.on_epoch_end(epoch)
def _forward_once_for_flops(self, model):
# forward the model once to get the flops calculated
self.logger.info("Training parallel: Forward one batch for the flops information")
inputs, _ = next(iter(self.train_queue))
model(inputs.to(self.device))
| [
"torch.device",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.nn.CrossEntropyLoss"
] | 1.2.0 | Harald-R/aw_nas | 8cf0cf48f7bcfd7893e6355dcc3ccbc83fd39783 |
1.8 | import ast
import numpy as np
import torch as torch
import torch.nn as nn
import torch.nn.functional as F
def get_descendants(node, ls):
for child in node.children:
ls.append(child)
get_descendants(child, ls)
return ls
class Node():
'''
For each node we store its parent and children nodes, as well as, its node type and its vector
representation
'''
def __init__(self, node, depth, parent = None):
self.node = node
self.children = []
self.parent = parent
self.type = self.get_node_type(node)
self.vector = []
self.combined_vector = []
self.leaves = None
self.depth = depth
self.position = None
self.siblings = None
self.y = None
self.pool = None
def get_node_type(self, node):
if node.__class__.__name__ == 'FunctionDef':
if 'wrap' in node.name:
return 'wrap'
else:
return node.__class__.__name__
else:
return node.__class__.__name__
def __str__(self):
return self.type
#Returns the children nodes of each node
def get_children(self):
ls = []
for child in ast.iter_child_nodes(self.node):
#nodeChild = Node(child, self)
ls.append(child)
return ls
def descendants(self):
'''
This function will return all of the nodes under the node itself.
Note: the node itself is considered a descendant. This is done because it's useful when obtaining
all of the nodes (otherwise we would miss the 'Module' node)
'''
ls = [self]
return get_descendants(self, ls)
# Assigns the vector embedding to each node
def set_vector(self, df):
if self.type in df.columns:
vector = df[self.type]
else:
num = len(df['Yield'])
vector = [0 for i in range(num)]
if type(vector) == torch.Tensor:
self.vector = vector
else:
self.vector = torch.tensor(vector).float().requires_grad_()
def set_combined_vector(self, vector):
self.combined_vector = vector
'''
# Assigns the number of leaves nodes under each node
def set_l(self, leaves_nodes):
self.leaves_nodes = leaves_nodes
'''
def get_leaves(self):
# TODO determinar cuándo hace falta hacer esto
leaves = []
descendants = self.descendants()
for descendant in descendants:
if descendant.children == []:
leaves.append(descendant)
return leaves
def set_leaves(self):
self.leaves = self.get_leaves()
def set_y(self, y):
self.y = y
def set_pool(self, pool):
self.pool = pool
def set_children(self, child):
self.children.append(child)
def set_matrix_and_coeffs(self, matrix, t,l,r):
self.matrix = matrix
self.coeff_t = t
self.coeff_l = l
self.coeff_r = r | [
"torch.tensor"
] | 1.8.1 | ADCenterNetwork/discern-fmk | 4781f1a986f7b24f298b2729b87ddee4227cb1d0 |
1.8 | import os
import torch
from torch.utils.data import Dataset, random_split, DataLoader
from PIL import Image
import torchvision.models as models
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
# from sklearn.metrics import f1_score
import torch.nn.functional as F
import torch.nn as nn
from torchvision.utils import make_grid
from torchvision.datasets import ImageFolder
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {"val_loss": loss.detach(), "val_acc": acc}
def validation_epoch_end(self, outputs):
batch_losses = [x["val_loss"] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x["val_acc"] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {"val_loss": epoch_loss.item(), "val_acc": epoch_acc.item()}
def epoch_end(self, epoch, result):
print(
"Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(
epoch, result["val_loss"], result["val_acc"]
)
)
def conv_block(in_channels, out_channels, pool=False):
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
if pool:
layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
class ResNet9(ImageClassificationBase):
def __init__(self, in_channels, num_classes):
super().__init__()
self.conv1 = conv_block(in_channels, 64)
self.conv2 = conv_block(64, 128, pool=True) # 128*32
self.res1 = nn.Sequential(conv_block(128, 128), conv_block(128, 128))
self.conv3 = conv_block(128, 256, pool=True) # 256*16
self.conv4 = conv_block(256, 512, pool=True) # 512*8
self.res2 = nn.Sequential(conv_block(512, 512), conv_block(512, 512))
self.conv5 = conv_block(512, 1024, pool=True) # 1024*4
self.res3 = nn.Sequential(conv_block(1024, 1024), conv_block(1024, 1024))
self.classifier = nn.Sequential(
nn.MaxPool2d(4), nn.Flatten(), nn.Dropout(0.2), nn.Linear(1024, num_classes)
)
def forward(self, xb):
out = self.conv1(xb)
out = self.conv2(out)
out = self.res1(out) + out
out = self.conv3(out)
out = self.conv4(out)
out = self.res2(out) + out
out = self.conv5(out)
out = self.res3(out) + out
out = self.classifier(out)
return out
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device("cuda")
else:
return torch.device("cpu")
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader:
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
def preprocess(image):
transformations = transforms.Compose(
[transforms.Resize((64, 64)), transforms.ToTensor()]
)
image = transformations(image)
return image
class z:
"""Wrap a dataloader to move data to a device"""
classes = []
def __init__(self):
self.classes = ["COVID", "Lung_Opacity", "Normal", "Viral Pneumonia"]
def predict_image(img, model):
device = get_default_device()
# Convert to a batch of 1
xb = to_device(img.unsqueeze(0), device)
# xb = img.unsqueeze(0)
# Get predictions from model
yb = model(xb)
# Pick index with highest probability
prob, preds = torch.max(yb, dim=1)
print(preds)
dataset = z()
# Retrieve the class label
return dataset.classes[preds[0].item()]
| [
"torch.nn.Linear",
"torch.device",
"torch.nn.Dropout",
"torch.stack",
"torch.max",
"torch.nn.Sequential",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.Flatten"
] | 1.8.1 | adityapatkar/covid-detection | 59797402bb4359d6070558d40597f7fce3958a0d |
1.7 | """
Run CGLE example using specified config file.
"""
import int.cgle as cint
import tests
import lpde
import os
import pickle
import shutil
import configparser
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import torch
from torch.utils.tensorboard import SummaryWriter
import utils_cgle
from scipy.spatial.distance import cdist
torch.set_default_dtype(torch.float32)
POINTS_W = 397.48499
plt.set_cmap('plasma')
def integrate_system(config, n, path, verbose=False, n_min=0):
"""Integrate complex Ginzburg-Landau equation."""
pars = {}
pars["c1"] = float(config["c1"])
pars["c2"] = float(config["c2"])
pars["c3"] = float(config["c3"])
pars["mu"] = float(config["mu"])
pars["L"] = float(config["L"])
data_dict = cint.integrate(pars=pars,
dt=float(config["dt"]), N=int(config["N_int"]), T=int(config["T"]),
tmin=float(config["tmin"]), tmax=float(config["tmax"]),
append_init=True)
if verbose:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data_dict["xx"], data_dict["data"][-1].real, label='real')
ax.plot(data_dict["xx"], data_dict["data"][-1].imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('snapshot')
plt.legend()
plt.show()
for i in range(n_min, n):
for p in [0, -1, 1]:
data_perturbed = cint.integrate(pars=pars,
dt=data_dict["dt"], N=data_dict["N"], T=data_dict["T"],
tmin=0, tmax=data_dict["tmax"]-data_dict["tmin"],
ic='manual',
Ainit=data_dict["data"][int(i*int(config["T_off"]))] +
p*float(config["eps"]) *
data_dict["data"][int(i*int(config["T_off"]))],
append_init=True)
data_perturbed["data"] = data_perturbed["data"][:, ::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["xx"] = data_perturbed["xx"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["N"] = int(config["N"])
output = open(path + 'run'+str(i)+'_p_'+str(p)+'.pkl', 'wb')
pickle.dump(data_perturbed, output)
output.close()
def make_plot_paper(config):
"""Plot CGLE simulation results."""
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(0)+'.pkl', 'rb')
data_dict = pickle.load(pkl_file)
pkl_file.close()
# t_off = 2000
t_off = 0
idxs = np.arange(data_dict["N"])
np.random.shuffle(idxs)
fig = plt.figure(figsize=(POINTS_W/72, 0.9*POINTS_W/72))
ax1 = fig.add_subplot(321)
pl1 = ax1.pcolor(data_dict["xx"], data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax1.set_xlabel('$x$', labelpad=-2)
ax1.set_ylabel('$t$', labelpad=0)
ax1.set_xlim((0, data_dict["L"]))
ax1.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar1 = plt.colorbar(pl1)
cbar1.set_label('Re $W$', labelpad=-3)
ax2 = fig.add_subplot(322)
pl2 = ax2.pcolor(np.arange(data_dict["N"]), data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10, idxs].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax2.set_xlabel('$i$', labelpad=-2)
ax2.set_ylabel('$t$', labelpad=0)
ax2.set_xlim((0, data_dict["N"]))
ax2.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar2 = plt.colorbar(pl2)
cbar2.set_label('Re $W$', labelpad=-3)
ax3 = fig.add_subplot(323)
v_scaled = np.load(config["GENERAL"]["save_dir"]+'/v_scaled.npy')
pl3 = ax3.scatter(np.arange(data_dict["N"]), v_scaled[idxs], s=2, c=data_dict["xx"][idxs],
cmap='plasma')
ax3.set_xlabel('$i$', labelpad=-2)
ax3.set_xlim((0, data_dict["N"]))
ax3.set_ylabel(r'$\phi_1$', labelpad=-3)
cbar3 = plt.colorbar(pl3)
cbar3.set_label('$x$', labelpad=0)
ax4 = fig.add_subplot(324)
pl4 = ax4.pcolor(v_scaled, data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax4.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
ax4.set_xlabel(r'$\phi_1$', labelpad=0)
ax4.set_xlim((-1, 1))
ax4.set_ylabel(r'$t$', labelpad=0)
cbar4 = plt.colorbar(pl4)
cbar4.set_label('Re $W$', labelpad=-3)
dataset_train = utils_cgle.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataset_test = utils_cgle.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataloader_train = torch.utils.data.DataLoader(
dataset_train, batch_size=int(config["TRAINING"]['batch_size']), shuffle=True,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=int(config["TRAINING"]['batch_size']), shuffle=False,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
network = lpde.network.Network(config["MODEL"], n_vars=2)
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
model.load_network('test.model')
num_pars = sum(p.numel() for p in model.net.parameters() if p.requires_grad)
print(num_pars)
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(0)+'.pkl', 'rb')
data_unperturbed = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(-1)+'.pkl', 'rb')
data_perturbed_neg = pickle.load(pkl_file)
pkl_file.close()
prediction = model.integrate_svd(dataset_test, dataset_train.svd, 0, data_unperturbed["T"])
print("Calculating closest distances....")
dists_neg = cdist(np.append(data_perturbed_neg["data"].real, data_perturbed_neg["data"].imag,
axis=1), np.append(
data_unperturbed["data"].real, data_unperturbed["data"].imag, axis=1))
dists_learned = cdist(np.append(prediction[:, 0], prediction[:, 1], axis=1), np.append(
data_unperturbed["data"].real, data_unperturbed["data"].imag, axis=1))
phi_arr = np.linspace(-1, 1, data_unperturbed["N"])
t_off = 0
ax5 = fig.add_subplot(325)
pl5 = ax5.pcolor(phi_arr, data_unperturbed["tt"][::10]+t_off,
prediction[1::10, 0], vmin=-1, vmax=1,
rasterized=True)
ax5.axvline(x=(phi_arr[3]+phi_arr[4])/2, ymin=0, ymax=1, color='white', lw=1)
ax5.axvline(x=(phi_arr[-4]+phi_arr[-5])/2, ymin=0, ymax=1, color='white', lw=1)
ax5.set_xlabel(r'$\phi_1$', labelpad=0)
ax5.set_ylabel(r'$t$', labelpad=0)
ax5.set_xlim((-1, 1))
ax5.set_ylim((data_unperturbed["tmin"]+t_off, data_unperturbed["tmax"]+t_off))
cbar5 = plt.colorbar(pl5)
cbar5.set_label('Re $W$', labelpad=-3)
ax6 = fig.add_subplot(326)
ax6.plot(data_unperturbed["tt"]+t_off, np.min(dists_neg, axis=1)[:-1], label='$d$ true')
ax6.plot(data_unperturbed["tt"]+t_off, np.min(dists_learned, axis=1)
[:-1], '--', label='$d$ learned')
plt.legend()
ax6.set_xlabel('$t$', labelpad=0)
ax6.set_ylabel('$d$', labelpad=0)
# plt.subplots_adjust(top=0.94, wspace=0.35, right=0.98, bottom=0.18, left=0.08)
ax1.text(-0.25, 1., r'$\mathbf{a}$', transform=ax1.transAxes, weight='bold', fontsize=12)
ax2.text(-0.25, 1., r'$\mathbf{b}$', transform=ax2.transAxes, weight='bold', fontsize=12)
ax3.text(-0.25, 1., r'$\mathbf{c}$', transform=ax3.transAxes, weight='bold', fontsize=12)
ax4.text(-0.25, 1., r'$\mathbf{d}$', transform=ax4.transAxes, weight='bold', fontsize=12)
ax5.text(-0.25, 1., r'$\mathbf{e}$', transform=ax5.transAxes, weight='bold', fontsize=12)
ax6.text(-0.25, 1., r'$\mathbf{f}$', transform=ax6.transAxes, weight='bold', fontsize=12)
plt.subplots_adjust(top=0.96, wspace=0.35, right=0.95, bottom=0.09, hspace=0.31, left=0.08)
plt.show()
def main(config):
"""Integrate system and train model."""
verbose = config["GENERAL"].getboolean("verbose")
# Create data folders
if not os.path.exists(config["GENERAL"]["save_dir"]):
os.makedirs(config["GENERAL"]["save_dir"])
if not os.path.exists(config["GENERAL"]["save_dir"]+'/tests'):
os.makedirs(config["GENERAL"]["save_dir"]+'/tests')
# Create training and test data
if not os.path.exists(config["GENERAL"]["save_dir"]+'/dat'):
os.makedirs(config["GENERAL"]["save_dir"]+'/dat')
if config["MODEL"].getboolean("use_param"):
raise NotImplementedError
else:
integrate_system(config["SYSTEM"], int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["GENERAL"]["save_dir"]+'/dat/',
verbose=verbose)
# Create Dataset
dataset_train = utils_cgle.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_test = utils_cgle.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
if config["GENERAL"].getboolean("use_dmaps"):
utils_cgle.dmaps_transform(int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]), dataset_train,
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_train = utils_cgle.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_test = utils_cgle.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
if verbose:
tests.test_perturbation(path=config["GENERAL"]["save_dir"], idx=0)
tests.test_dt(cint.f, path=config["GENERAL"]["save_dir"], idx=0)
tests.test_dataset(dataset_train, path=config["GENERAL"]["save_dir"])
if dataset.train.svd:
tests.test_svd(dataset_train, dataset_test, path=config["GENERAL"]["save_dir"])
# Create Dataloader
dataloader_train = torch.utils.data.DataLoader(
dataset_train, batch_size=int(config["TRAINING"]['batch_size']), shuffle=True,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=int(config["TRAINING"]['batch_size']), shuffle=False,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
network = lpde.network.Network(config["MODEL"], n_vars=2)
delta_x = float(config["SYSTEM"]["L"])/int(config["SYSTEM"]["N"]) * \
float(config["MODEL"]["rescale_dx"])
if verbose:
tests.test_fd_coeffs(network, path=config["GENERAL"]["save_dir"])
tests.test_derivs(network, torch.tensor(dataset_train.x_data[:1],
dtype=torch.get_default_dtype()),
torch.tensor([delta_x], dtype=torch.get_default_dtype()),
path=config["GENERAL"]["save_dir"])
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
if not os.path.exists(config["GENERAL"]["save_dir"]+'/log'):
os.makedirs(config["GENERAL"]["save_dir"]+'/log')
else:
shutil.rmtree(config["GENERAL"]["save_dir"]+'/log')
os.makedirs(config["GENERAL"]["save_dir"]+'/log')
logger = SummaryWriter(config["GENERAL"]["save_dir"]+'/log/')
progress_bar = tqdm.tqdm(range(0, int(config["TRAINING"]['epochs'])),
total=int(config["TRAINING"]['epochs']),
leave=True, desc=lpde.utils.progress(0, 0))
if config["GENERAL"].getboolean('proceed_training'):
model.load_network('test.model')
for epoch in progress_bar:
train_loss = model.train()
val_loss = model.validate()
progress_bar.set_description(lpde.utils.progress(train_loss, val_loss))
logger.add_scalar('Loss/train', train_loss, epoch)
logger.add_scalar('Loss/val', val_loss, epoch)
logger.add_scalar('learning rate', model.optimizer.param_groups[-1]["lr"], epoch)
model.save_network('test.model')
if verbose:
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
model.load_network('test.model')
tests.test_learned_dt(model, dataset_test, cint.f,
path=config["GENERAL"]["save_dir"], idx=0)
tests.test_learned_dt(model, dataset_test, cint.f,
path=config["GENERAL"]["save_dir"], idx=2500)
tests.test_learned_dt(model, dataset_test, cint.f,
path=config["GENERAL"]["save_dir"], idx=4500)
_ = tests.test_integration(model, dataset_test, dataset_train.svd, 1000, 4000,
path=config["GENERAL"]["save_dir"])
tests.test_transient_dynamics(model, dataset_test, dataset_train.svd,
idx=int(config["TRAINING"]["n_train"]), t_off=0,
path=config["GENERAL"]["save_dir"])
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read('config/config.cfg')
main(config)
make_plot_paper(config)
| [
"torch.get_default_dtype",
"torch.set_default_dtype",
"torch.utils.tensorboard.SummaryWriter"
] | 1.7.0 | fkemeth/emergent_pdes | d0501f21c9eb569543a19d4d95d6c91a9ccb11fe |
1.7 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
import warnings
from glob import glob
import nibabel as nib
import numpy as np
import torch
from ignite.metrics import Accuracy
from torch.utils.tensorboard import SummaryWriter
import monai
from monai.data import create_test_image_3d
from monai.engines import IterationEvents, SupervisedEvaluator, SupervisedTrainer
from monai.handlers import (
CheckpointLoader,
CheckpointSaver,
LrScheduleHandler,
MeanDice,
SegmentationSaver,
StatsHandler,
TensorBoardImageHandler,
TensorBoardStatsHandler,
ValidationHandler,
from_engine,
)
from monai.inferers import SimpleInferer, SlidingWindowInferer
from monai.transforms import (
Activationsd,
AsChannelFirstd,
AsDiscreted,
Compose,
KeepLargestConnectedComponentd,
LoadImaged,
RandCropByPosNegLabeld,
RandRotate90d,
SaveImaged,
ScaleIntensityd,
ToTensord,
)
from monai.utils import set_determinism
from monai.utils.enums import PostFix
from tests.testing_data.integration_answers import test_integration_value
from tests.utils import DistTestCase, TimedCall, pytorch_after, skip_if_quick
TASK = "integration_workflows"
def run_training_test(root_dir, device="cuda:0", amp=False, num_workers=4):
images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
train_files = [{"image": img, "label": seg} for img, seg in zip(images[:20], segs[:20])]
val_files = [{"image": img, "label": seg} for img, seg in zip(images[-20:], segs[-20:])]
# define transforms for image and segmentation
train_transforms = Compose(
[
LoadImaged(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
RandCropByPosNegLabeld(
keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
),
RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]),
ToTensord(keys=["image", "label"]),
]
)
val_transforms = Compose(
[
LoadImaged(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
ToTensord(keys=["image", "label"]),
]
)
# create a training data loader
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=num_workers)
# create a validation data loader
val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0)
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=num_workers)
# create UNet, DiceLoss and Adam optimizer
net = monai.networks.nets.UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
loss = monai.losses.DiceLoss(sigmoid=True)
opt = torch.optim.Adam(net.parameters(), 1e-3)
lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1)
summary_writer = SummaryWriter(log_dir=root_dir)
val_postprocessing = Compose(
[
ToTensord(keys=["pred", "label"]),
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold=0.5),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
class _TestEvalIterEvents:
def attach(self, engine):
engine.add_event_handler(IterationEvents.FORWARD_COMPLETED, self._forward_completed)
def _forward_completed(self, engine):
pass
val_handlers = [
StatsHandler(iteration_log=False),
TensorBoardStatsHandler(summary_writer=summary_writer, iteration_log=False),
TensorBoardImageHandler(
log_dir=root_dir, batch_transform=from_engine(["image", "label"]), output_transform=from_engine("pred")
),
CheckpointSaver(save_dir=root_dir, save_dict={"net": net}, save_key_metric=True),
_TestEvalIterEvents(),
]
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
postprocessing=val_postprocessing,
key_val_metric={
"val_mean_dice": MeanDice(include_background=True, output_transform=from_engine(["pred", "label"]))
},
additional_metrics={"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]))},
metric_cmp_fn=lambda cur, prev: cur >= prev, # if greater or equal, treat as new best metric
val_handlers=val_handlers,
amp=bool(amp),
to_kwargs={"memory_format": torch.preserve_format},
amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32} if pytorch_after(1, 10, 0) else {},
)
train_postprocessing = Compose(
[
ToTensord(keys=["pred", "label"]),
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold=0.5),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
class _TestTrainIterEvents:
def attach(self, engine):
engine.add_event_handler(IterationEvents.FORWARD_COMPLETED, self._forward_completed)
engine.add_event_handler(IterationEvents.LOSS_COMPLETED, self._loss_completed)
engine.add_event_handler(IterationEvents.BACKWARD_COMPLETED, self._backward_completed)
engine.add_event_handler(IterationEvents.MODEL_COMPLETED, self._model_completed)
def _forward_completed(self, engine):
pass
def _loss_completed(self, engine):
pass
def _backward_completed(self, engine):
pass
def _model_completed(self, engine):
pass
train_handlers = [
LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
ValidationHandler(validator=evaluator, interval=2, epoch_level=True),
StatsHandler(tag_name="train_loss", output_transform=from_engine("loss", first=True)),
TensorBoardStatsHandler(
summary_writer=summary_writer, tag_name="train_loss", output_transform=from_engine("loss", first=True)
),
CheckpointSaver(save_dir=root_dir, save_dict={"net": net, "opt": opt}, save_interval=2, epoch_level=True),
_TestTrainIterEvents(),
]
trainer = SupervisedTrainer(
device=device,
max_epochs=5,
train_data_loader=train_loader,
network=net,
optimizer=opt,
loss_function=loss,
inferer=SimpleInferer(),
postprocessing=train_postprocessing,
key_train_metric={"train_acc": Accuracy(output_transform=from_engine(["pred", "label"]))},
train_handlers=train_handlers,
amp=bool(amp),
optim_set_to_none=True,
to_kwargs={"memory_format": torch.preserve_format},
amp_kwargs={"dtype": torch.float16 if bool(amp) else torch.float32} if pytorch_after(1, 10, 0) else {},
)
trainer.run()
return evaluator.state.best_metric
def run_inference_test(root_dir, model_file, device="cuda:0", amp=False, num_workers=4):
images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)]
# define transforms for image and segmentation
val_transforms = Compose(
[
LoadImaged(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
ToTensord(keys=["image", "label"]),
]
)
# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=num_workers)
# create UNet, DiceLoss and Adam optimizer
net = monai.networks.nets.UNet(
spatial_dims=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
val_postprocessing = Compose(
[
ToTensord(keys=["pred", "label"]),
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold=0.5),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
# test the case that `pred` in `engine.state.output`, while `image_meta_dict` in `engine.state.batch`
SaveImaged(
keys="pred", meta_keys=PostFix.meta("image"), output_dir=root_dir, output_postfix="seg_transform"
),
]
)
val_handlers = [
StatsHandler(iteration_log=False),
CheckpointLoader(load_path=f"{model_file}", load_dict={"net": net}),
SegmentationSaver(
output_dir=root_dir,
output_postfix="seg_handler",
batch_transform=from_engine(PostFix.meta("image")),
output_transform=from_engine("pred"),
),
]
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
postprocessing=val_postprocessing,
key_val_metric={
"val_mean_dice": MeanDice(include_background=True, output_transform=from_engine(["pred", "label"]))
},
additional_metrics={"val_acc": Accuracy(output_transform=from_engine(["pred", "label"]))},
val_handlers=val_handlers,
amp=bool(amp),
)
evaluator.run()
return evaluator.state.best_metric
@skip_if_quick
class IntegrationWorkflows(DistTestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"seg{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
def train_and_infer(self, idx=0):
results = []
set_determinism(seed=0)
best_metric = run_training_test(self.data_dir, device=self.device, amp=(idx == 2))
model_file = sorted(glob(os.path.join(self.data_dir, "net_key_metric*.pt")))[-1]
infer_metric = run_inference_test(self.data_dir, model_file, device=self.device, amp=(idx == 2))
print("best metric", best_metric)
print("infer metric", infer_metric)
if idx == 2:
self.assertTrue(test_integration_value(TASK, key="best_metric_2", data=best_metric, rtol=1e-2))
else:
self.assertTrue(test_integration_value(TASK, key="best_metric", data=best_metric, rtol=1e-2))
# check inference properties
if idx == 2:
self.assertTrue(test_integration_value(TASK, key="infer_metric_2", data=infer_metric, rtol=1e-2))
else:
self.assertTrue(test_integration_value(TASK, key="infer_metric", data=infer_metric, rtol=1e-2))
results.append(best_metric)
results.append(infer_metric)
def _test_saved_files(postfix):
output_files = sorted(glob(os.path.join(self.data_dir, "img*", f"*{postfix}.nii.gz")))
values = []
for output in output_files:
ave = np.mean(nib.load(output).get_fdata())
values.append(ave)
if idx == 2:
self.assertTrue(test_integration_value(TASK, key="output_sums_2", data=values, rtol=1e-2))
else:
self.assertTrue(test_integration_value(TASK, key="output_sums", data=values, rtol=1e-2))
_test_saved_files(postfix="seg_handler")
_test_saved_files(postfix="seg_transform")
try:
os.remove(model_file)
except Exception as e:
warnings.warn(f"Fail to remove {model_file}: {e}.")
if torch.cuda.is_available():
try:
torch.cuda.empty_cache()
except Exception:
pass
return results
def test_training(self):
repeated = []
test_rounds = 3
for i in range(test_rounds):
results = self.train_and_infer(idx=i)
repeated.append(results)
np.testing.assert_allclose(repeated[0], repeated[1])
@TimedCall(seconds=300, skip_timing=not torch.cuda.is_available(), daemon=False)
def test_timing(self):
self.train_and_infer(idx=2)
if __name__ == "__main__":
unittest.main()
| [
"torch.optim.lr_scheduler.StepLR",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter"
] | 1.7 | diazandr3s/MONAI | 209db9e08129855df878634639d4c2700d9acd83 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
import math
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "roberta-base"
_CONFIG_FOR_DOC = "RobertaConfig"
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"roberta-base",
"roberta-large",
"roberta-large-mnli",
"distilroberta-base",
"roberta-base-openai-detector",
"roberta-large-openai-detector",
# See all RoBERTa models at https://huggingface.co/models?filter=roberta
]
class RobertaEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Roberta
class RobertaSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class RobertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta
class RobertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = RobertaSelfAttention(config)
self.output = RobertaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class RobertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class RobertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Roberta
class RobertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RobertaAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = RobertaAttention(config)
self.intermediate = RobertaIntermediate(config)
self.output = RobertaOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Roberta
class RobertaEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([RobertaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class RobertaPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class RobertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RobertaConfig
base_model_prefix = "roberta"
supports_gradient_checkpointing = True
# Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, RobertaEncoder):
module.gradient_checkpointing = value
def update_keys_to_ignore(self, config, del_keys_to_ignore):
"""Remove some keys from ignore list"""
if not config.tie_word_embeddings:
# must make a new list, or the class variable gets modified!
self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore]
self._keys_to_ignore_on_load_missing = [
k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore
]
ROBERTA_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.RobertaTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING,
)
class RobertaModel(RobertaPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need`_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
.. _`Attention is all you need`: https://arxiv.org/abs/1706.03762
"""
_keys_to_ignore_on_load_missing = [r"position_ids"]
# Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Roberta
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = RobertaEmbeddings(config)
self.encoder = RobertaEncoder(config)
self.pooler = RobertaPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
# Copied from transformers.models.bert.modeling_bert.BertModel.forward
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""RoBERTa Model with a `language modeling` head on top for CLM fine-tuning. """, ROBERTA_START_DOCSTRING
)
class RobertaForCausalLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `RobertaLMHeadModel` as a standalone, add `is_decoder=True.`")
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import RobertaTokenizer, RobertaForCausalLM, RobertaConfig
>>> import torch
>>> tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
>>> config = RobertaConfig.from_pretrained("roberta-base")
>>> config.is_decoder = True
>>> model = RobertaForCausalLM.from_pretrained('roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING)
class RobertaForMaskedLM(RobertaPreTrainedModel):
_keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"]
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.lm_head = RobertaLMHead(config)
# The LM head weights require special treatment only when they are tied with the word embeddings
self.update_keys_to_ignore(config, ["lm_head.decoder.weight"])
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<mask>",
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForSequenceClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.classifier = RobertaClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForMultipleChoice(RobertaPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForTokenClassification(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROBERTA_START_DOCSTRING,
)
class RobertaForQuestionAnswering(RobertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(ROBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Softmax",
"torch.einsum",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.tanh",
"torch.matmul",
"torch.nn.Embedding",
"torch.cumsum"
] | 1.0 | djroxx2000/transformers | 77770ec79883343d32051cfb6a04f64523cd8df1 |
1.0 | # coding=utf-8
# Copyright 2020 Microsoft and the Hugging Face Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DeBERTa model. """
import math
from collections.abc import Sequence
import torch
from torch import _softmax_backward_data, nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_deberta import DebertaConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DebertaConfig"
_TOKENIZER_FOR_DOC = "DebertaTokenizer"
_CHECKPOINT_FOR_DOC = "microsoft/deberta-base"
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/deberta-base",
"microsoft/deberta-large",
"microsoft/deberta-xlarge",
"microsoft/deberta-base-mnli",
"microsoft/deberta-large-mnli",
"microsoft/deberta-xlarge-mnli",
]
class ContextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size)
self.dropout = StableDropout(config.pooler_dropout)
self.config = config
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token)
pooled_output = self.dense(context_token)
pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output)
return pooled_output
@property
def output_dim(self):
return self.config.hidden_size
class XSoftmax(torch.autograd.Function):
"""
Masked Softmax which is optimized for saving memory
Args:
input (:obj:`torch.tensor`): The input tensor that will apply softmax.
mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
Example::
>>> import torch
>>> from transformers.models.deberta.modeling_deberta import XSoftmax
>>> # Make a tensor
>>> x = torch.randn([4,20,100])
>>> # Create a mask
>>> mask = (x>0).int()
>>> y = XSoftmax.apply(x, mask, dim=-1)
"""
@staticmethod
def forward(self, input, mask, dim):
self.dim = dim
rmask = ~(mask.bool())
output = input.masked_fill(rmask, float("-inf"))
output = torch.softmax(output, self.dim)
output.masked_fill_(rmask, 0)
self.save_for_backward(output)
return output
@staticmethod
def backward(self, grad_output):
(output,) = self.saved_tensors
inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)
return inputGrad, None, None
class DropoutContext(object):
def __init__(self):
self.dropout = 0
self.mask = None
self.scale = 1
self.reuse_mask = True
def get_mask(input, local_context):
if not isinstance(local_context, DropoutContext):
dropout = local_context
mask = None
else:
dropout = local_context.dropout
dropout *= local_context.scale
mask = local_context.mask if local_context.reuse_mask else None
if dropout > 0 and mask is None:
mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool()
if isinstance(local_context, DropoutContext):
if local_context.mask is None:
local_context.mask = mask
return mask, dropout
class XDropout(torch.autograd.Function):
"""Optimized dropout function to save computation and memory by using mask operation instead of multiplication."""
@staticmethod
def forward(ctx, input, local_ctx):
mask, dropout = get_mask(input, local_ctx)
ctx.scale = 1.0 / (1 - dropout)
if dropout > 0:
ctx.save_for_backward(mask)
return input.masked_fill(mask, 0) * ctx.scale
else:
return input
@staticmethod
def backward(ctx, grad_output):
if ctx.scale > 1:
(mask,) = ctx.saved_tensors
return grad_output.masked_fill(mask, 0) * ctx.scale, None
else:
return grad_output, None
class StableDropout(nn.Module):
"""
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
"""
def __init__(self, drop_prob):
super().__init__()
self.drop_prob = drop_prob
self.count = 0
self.context_stack = None
def forward(self, x):
"""
Call the module
Args:
x (:obj:`torch.tensor`): The input tensor to apply dropout
"""
if self.training and self.drop_prob > 0:
return XDropout.apply(x, self.get_context())
return x
def clear_context(self):
self.count = 0
self.context_stack = None
def init_context(self, reuse_mask=True, scale=1):
if self.context_stack is None:
self.context_stack = []
self.count = 0
for c in self.context_stack:
c.reuse_mask = reuse_mask
c.scale = scale
def get_context(self):
if self.context_stack is not None:
if self.count >= len(self.context_stack):
self.context_stack.append(DropoutContext())
ctx = self.context_stack[self.count]
ctx.dropout = self.drop_prob
self.count += 1
return ctx
else:
return self.drop_prob
class DebertaLayerNorm(nn.Module):
"""LayerNorm module in the TF style (epsilon inside the square root)."""
def __init__(self, size, eps=1e-12):
super().__init__()
self.weight = nn.Parameter(torch.ones(size))
self.bias = nn.Parameter(torch.zeros(size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_type = hidden_states.dtype
hidden_states = hidden_states.float()
mean = hidden_states.mean(-1, keepdim=True)
variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True)
hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon)
hidden_states = hidden_states.to(input_type)
y = self.weight * hidden_states + self.bias
return y
class DebertaSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class DebertaAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = DisentangledSelfAttention(config)
self.output = DebertaSelfOutput(config)
self.config = config
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
self_output = self.self(
hidden_states,
attention_mask,
return_att,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if return_att:
self_output, att_matrix = self_output
if query_states is None:
query_states = hidden_states
attention_output = self.output(self_output, query_states)
if return_att:
return (attention_output, att_matrix)
else:
return attention_output
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta
class DebertaIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class DebertaOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class DebertaLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = DebertaAttention(config)
self.intermediate = DebertaIntermediate(config)
self.output = DebertaOutput(config)
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
attention_output = self.attention(
hidden_states,
attention_mask,
return_att=return_att,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if return_att:
attention_output, att_matrix = attention_output
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if return_att:
return (layer_output, att_matrix)
else:
return layer_output
class DebertaEncoder(nn.Module):
"""Modified BertEncoder with relative position bias support"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)])
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size)
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None
return rel_embeddings
def get_attention_mask(self, attention_mask):
if attention_mask.dim() <= 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1)
attention_mask = attention_mask.byte()
elif attention_mask.dim() == 3:
attention_mask = attention_mask.unsqueeze(1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = query_states.size(-2) if query_states is not None else hidden_states.size(-2)
relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device)
return relative_pos
def forward(
self,
hidden_states,
attention_mask,
output_hidden_states=True,
output_attentions=False,
query_states=None,
relative_pos=None,
return_dict=True,
):
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[0]
else:
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = layer_module(
next_kv,
attention_mask,
output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
if output_attentions:
hidden_states, att_m = hidden_states
if query_states is not None:
query_states = hidden_states
if isinstance(hidden_states, Sequence):
next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
else:
next_kv = hidden_states
if output_attentions:
all_attentions = all_attentions + (att_m,)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
def build_relative_position(query_size, key_size, device):
"""
Build relative position according to the query and key
We assume the absolute position of query :math:`P_q` is range from (0, query_size) and the absolute position of key
:math:`P_k` is range from (0, key_size), The relative positions from query to key is :math:`R_{q \\rightarrow k} =
P_q - P_k`
Args:
query_size (int): the length of query
key_size (int): the length of key
Return:
:obj:`torch.LongTensor`: A tensor with shape [1, query_size, key_size]
"""
q_ids = torch.arange(query_size, dtype=torch.long, device=device)
k_ids = torch.arange(key_size, dtype=torch.long, device=device)
rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = rel_pos_ids.unsqueeze(0)
return rel_pos_ids
@torch.jit.script
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)])
@torch.jit.script
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)])
@torch.jit.script
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2)))
class DisentangledSelfAttention(nn.Module):
"""
Disentangled self-attention module
Parameters:
config (:obj:`str`):
A model config class instance with the configuration to build a new model. The schema is similar to
`BertConfig`, for more details, please refer :class:`~transformers.DebertaConfig`
"""
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False)
self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float))
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, "relative_attention", False)
self.talking_head = getattr(config, "talking_head", False)
if self.talking_head:
self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False)
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_dropout = StableDropout(config.hidden_dropout_prob)
if "c2p" in self.pos_att_type or "p2p" in self.pos_att_type:
self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False)
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = StableDropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask,
return_att=False,
query_states=None,
relative_pos=None,
rel_embeddings=None,
):
"""
Call the module
Args:
hidden_states (:obj:`torch.FloatTensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
`Attention(Q,K,V)`
attention_mask (:obj:`torch.ByteTensor`):
An attention mask matrix of shape [`B`, `N`, `N`] where `B` is the batch size, `N` is the maximum
sequence length in which element [i,j] = `1` means the `i` th token in the input can attend to the `j`
th token.
return_att (:obj:`bool`, optional):
Whether return the attention matrix.
query_states (:obj:`torch.FloatTensor`, optional):
The `Q` state in `Attention(Q,K,V)`.
relative_pos (:obj:`torch.LongTensor`):
The relative position encoding between the tokens in the sequence. It's of shape [`B`, `N`, `N`] with
values ranging in [`-max_relative_positions`, `max_relative_positions`].
rel_embeddings (:obj:`torch.FloatTensor`):
The embedding of relative distances. It's a tensor of shape [:math:`2 \\times
\\text{max_relative_positions}`, `hidden_size`].
"""
if query_states is None:
qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)
else:
def linear(w, b, x):
if b is not None:
return torch.matmul(x, w.t()) + b.t()
else:
return torch.matmul(x, w.t()) # + b.t()
ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)
qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]
qkvb = [None] * 3
q = linear(qkvw[0], qkvb[0], query_states)
k, v = [linear(qkvw[i], qkvb[i], hidden_states) for i in range(1, 3)]
query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]
query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
rel_att = None
# Take the dot product between "query" and "key" to get the raw attention scores.
scale_factor = 1 + len(self.pos_att_type)
scale = math.sqrt(query_layer.size(-1) * scale_factor)
query_layer = query_layer / scale
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.relative_attention:
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
if rel_att is not None:
attention_scores = attention_scores + rel_att
# bxhxlxd
if self.talking_head:
attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)
attention_probs = self.dropout(attention_probs)
if self.talking_head:
attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (-1,)
context_layer = context_layer.view(*new_context_layer_shape)
if return_att:
return (context_layer, attention_probs)
else:
return context_layer
def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
if relative_pos is None:
q = query_layer.size(-2)
relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device)
if relative_pos.dim() == 2:
relative_pos = relative_pos.unsqueeze(0).unsqueeze(0)
elif relative_pos.dim() == 3:
relative_pos = relative_pos.unsqueeze(1)
# bxhxqxk
elif relative_pos.dim() != 4:
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}")
att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions)
relative_pos = relative_pos.long().to(query_layer.device)
rel_embeddings = rel_embeddings[
self.max_relative_positions - att_span : self.max_relative_positions + att_span, :
].unsqueeze(0)
if "c2p" in self.pos_att_type or "p2p" in self.pos_att_type:
pos_key_layer = self.pos_proj(rel_embeddings)
pos_key_layer = self.transpose_for_scores(pos_key_layer)
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
pos_query_layer = self.pos_q_proj(rel_embeddings)
pos_query_layer = self.transpose_for_scores(pos_query_layer)
score = 0
# content->position
if "c2p" in self.pos_att_type:
c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2))
c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos))
score += c2p_att
# position->content
if "p2c" in self.pos_att_type or "p2p" in self.pos_att_type:
pos_query_layer /= math.sqrt(pos_query_layer.size(-1) * scale_factor)
if query_layer.size(-2) != key_layer.size(-2):
r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device)
else:
r_pos = relative_pos
p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1)
if query_layer.size(-2) != key_layer.size(-2):
pos_index = relative_pos[:, :, :, 0].unsqueeze(-1)
if "p2c" in self.pos_att_type:
p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2))
p2c_att = torch.gather(
p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer)
).transpose(-1, -2)
if query_layer.size(-2) != key_layer.size(-2):
p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer))
score += p2c_att
return score
class DebertaEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
pad_token_id = getattr(config, "pad_token_id", 0)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id)
self.position_biased_input = getattr(config, "position_biased_input", True)
if not self.position_biased_input:
self.position_embeddings = None
else:
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size)
if self.embedding_size != config.hidden_size:
self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False)
self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)
self.dropout = StableDropout(config.hidden_dropout_prob)
self.config = config
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.position_embeddings is not None:
position_embeddings = self.position_embeddings(position_ids.long())
else:
position_embeddings = torch.zeros_like(inputs_embeds)
embeddings = inputs_embeds
if self.position_biased_input:
embeddings += position_embeddings
if self.config.type_vocab_size > 0:
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
if self.embedding_size != self.config.hidden_size:
embeddings = self.embed_proj(embeddings)
embeddings = self.LayerNorm(embeddings)
if mask is not None:
if mask.dim() != embeddings.dim():
if mask.dim() == 4:
mask = mask.squeeze(1).squeeze(1)
mask = mask.unsqueeze(2)
mask = mask.to(embeddings.dtype)
embeddings = embeddings * mask
embeddings = self.dropout(embeddings)
return embeddings
class DebertaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DebertaConfig
base_model_prefix = "deberta"
_keys_to_ignore_on_load_missing = ["position_ids"]
_keys_to_ignore_on_load_unexpected = ["position_embeddings"]
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
DEBERTA_START_DOCSTRING = r"""
The DeBERTa model was proposed in `DeBERTa: Decoding-enhanced BERT with Disentangled Attention
<https://arxiv.org/abs/2006.03654>`_ by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build on top of
BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.```
Parameters:
config (:class:`~transformers.DebertaConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
DEBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.DebertaTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
DEBERTA_START_DOCSTRING,
)
class DebertaModel(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = DebertaEmbeddings(config)
self.encoder = DebertaEncoder(config)
self.z_steps = 0
self.config = config
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError("The prune function is not implemented in DeBERTa model.")
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=attention_mask,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict,
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hidden_states,
attention_mask,
return_att=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
if not return_dict:
return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top. """, DEBERTA_START_DOCSTRING)
class DebertaForMaskedLM(DebertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
self.deberta = DebertaModel(config)
self.cls = DebertaOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# copied from transformers.models.bert.BertPredictionHeadTransform with bert -> deberta
class DebertaPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertLMPredictionHead with bert -> deberta
class DebertaLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = DebertaPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# copied from transformers.models.bert.BertOnlyMLMHead with bert -> deberta
class DebertaOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = DebertaLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@add_start_docstrings(
"""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DEBERTA_START_DOCSTRING,
)
class DebertaForSequenceClassification(DebertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
num_labels = getattr(config, "num_labels", 2)
self.num_labels = num_labels
self.deberta = DebertaModel(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, num_labels)
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = StableDropout(drop_out)
self.init_weights()
def get_input_embeddings(self):
return self.deberta.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.deberta.set_input_embeddings(new_embeddings)
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# regression task
loss_fn = nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = loss_fn(logits, labels.view(-1))
elif labels.dim() == 1 or labels.size(-1) == 1:
label_index = (labels >= 0).nonzero()
labels = labels.long()
if label_index.size(0) > 0:
labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1)))
labels = torch.gather(labels, 0, label_index.view(-1))
loss_fct = CrossEntropyLoss()
loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1))
else:
loss = torch.tensor(0).to(logits)
else:
log_softmax = nn.LogSoftmax(-1)
loss = -((log_softmax(logits) * labels).sum(-1)).mean()
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
else:
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
DEBERTA_START_DOCSTRING,
)
class DebertaForTokenClassification(DebertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DEBERTA_START_DOCSTRING,
)
class DebertaForQuestionAnswering(DebertaPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.deberta = DebertaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch._softmax_backward_data",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.zeros",
"torch.sqrt",
"torch.arange",
"torch.nn.MSELoss",
"torch.nn.LogSoftmax",
"torch.nn.CrossEntropyLoss",
"torch.softmax",
"torch.ones",
"torch.clamp",
"torch.tensor",
"torch.zeros_like",
"torch.matmul",
"torch.nn.Embedding",
"torch.empty_like"
] | 1.0 | djroxx2000/transformers | 76cadb7943c8492ec481f4f3925e9e8793a32c9d |
1.4 | """
discriminator model
"""
import torch
import torch.nn as nn
import torchvision.models as models
import json
from easydict import EasyDict as edict
from graphs.weights_initializer import weights_init
class EncoderModel(nn.Module):
def __init__(self,config):
super(EncoderModel, self).__init__()
self.config = config
self.num_classes = self.config.num_classes
self.progress = 0.0
self.encoder = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32, kernel_size=3, stride=1, padding=1), # b, 32, 224, 224
nn.ReLU(True),
nn.MaxPool2d(2, stride=None), # b, 32, 112, 112
nn.Conv2d(in_channels=32,out_channels=64, kernel_size=3, stride=1, padding=1), # b, 64, 112, 112
nn.ReLU(True),
nn.MaxPool2d(2, stride=None), # b, 64, 56, 56
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1), # b, 128, 56, 56
nn.ReLU(True),
nn.MaxPool2d(2, stride=None), # b, 128, 28, 28
)
self.linear_layers = nn.Sequential(
nn.Linear(2*self.config.image_size*self.config.image_size, out_features=128),
nn.Linear(128, out_features=self.config.num_ways),
)
def forward(self, x):
#x = self.encoder(x)
#print(x.size())
#self.discriminator = nn.Sequential(self.encoder, self.fc())
x = self.encoder(x)
x = x.view(x.size(0), -1)
x = self.linear_layers(x)
#print(x.size())
#x = x.view(1, -1)
#x = self.fc(x)
return x
class ConceptDiscriminatorModel(torch.nn.Module): #new model
def __init__(self, pretrained_model):
super(ConceptDiscriminatorModel, self).__init__()
self.new_model = nn.Sequential(
nn.Linear(in_features=512, out_features=30))
self.pretrained_model = pretrained_model
def forward(self, x):
x = self.pretrained_model(x)
return x
| [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d"
] | 1.4.0 | suvarnak/GenerativeFSLCovid | 0bdeb4ed444c5c9d59697c71d0733fc3a100944c |
1.9 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utils for generating stats from torch tensors.
"""
from typing import Iterator, List, Tuple, Union
import numpy as np
import torch
from torch.functional import F
def calc_sample_norms(
named_params: Iterator[Tuple[str, torch.Tensor]], flat: bool = True
) -> List[torch.Tensor]:
r"""
Calculates the norm of the given tensors for each sample.
This function calculates the overall norm of the given tensors for each sample,
assuming the each batch's dim is zero.
Args:
named_params: An iterator of tuples <name, param> with name being a
string and param being a tensor of shape ``[B, ...]`` where ``B``
is the size of the batch and is the 0th dimension.
flat: A flag, when set to `True` returns a flat norm over all
layers norms
Example:
>>> t1 = torch.rand((2, 5))
>>> t2 = torch.rand((2, 5))
>>> calc_sample_norms([("1", t1), ("2", t2)])
[tensor([1.5117, 1.0618])]
Returns:
A list of tensor norms where length of the list is the number of layers
"""
norms = [param.view(len(param), -1).norm(2, dim=-1) for name, param in named_params]
# calc norm over all layer norms if flat = True
if flat:
norms = [torch.stack(norms, dim=0).norm(2, dim=0)]
return norms
def sum_over_all_but_batch_and_last_n(
tensor: torch.Tensor, n_dims: int
) -> torch.Tensor:
r"""
Calculates the sum over all dimensions, except the first
(batch dimension), and excluding the last n_dims.
This function will ignore the first dimension and it will
not aggregate over the last n_dims dimensions.
Args:
tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``.
n_dims: Number of dimensions to keep.
Example:
>>> tensor = torch.ones(1, 2, 3, 4, 5)
>>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape
torch.Size([1, 4, 5])
Returns:
A tensor of shape ``(B, ..., X[n_dims-1])``
"""
if tensor.dim() == n_dims + 1:
return tensor
else:
dims = list(range(1, tensor.dim() - n_dims))
return tensor.sum(dim=dims)
def unfold3d(
tensor: torch.Tensor,
kernel_size: Union[int, Tuple[int, int, int]],
padding: Union[int, Tuple[int, int, int]] = 0,
stride: Union[int, Tuple[int, int, int]] = 1,
dilation: Union[int, Tuple[int, int, int]] = 1,
):
r"""
Extracts sliding local blocks from an batched input tensor.
:class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).
This method implements the same action for 5D inputs
Args:
tensor: An input tensor of shape ``(B, C, D, H, W)``.
kernel_size: the size of the sliding blocks
padding: implicit zero padding to be added on both sides of input
stride: the stride of the sliding blocks in the input spatial dimensions
dilation: the spacing between the kernel points.
Example:
>>> B, C, D, H, W = 3, 4, 5, 6, 7
>>> tensor = torch.arange(1,B*C*D*H*W+1.).view(B,C,D,H,W)
>>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape
torch.Size([3, 32, 120])
Returns:
A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions.
See :class:`torch.nn.Unfold` for more details
"""
if len(tensor.shape) != 5:
raise ValueError(
f"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}"
)
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size, kernel_size)
if isinstance(padding, int):
padding = (padding, padding, padding)
if isinstance(stride, int):
stride = (stride, stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
if dilation != (1, 1, 1):
raise NotImplementedError(f"dilation={dilation} not supported. We'd love a PR!")
batch_size, channels, _, _, _ = tensor.shape
# Input shape: (B, C, D, H, W)
tensor = F.pad(
tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0])
)
# Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0])
tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])
tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])
tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])
# Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2])
# For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold`
tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)
# Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2])
tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose(
1, 2
)
# Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2]
return tensor
| [
"torch.functional.F.pad",
"torch.stack"
] | 1.9.0 | nhsx-mirror/SynthVAE | 64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651 |
1.9 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, Union
import torch.nn as nn
from torch import Tensor
from torch.nn.modules.module import _IncompatibleKeys
def filter_out_old_keys(self, state_dict, prefix, local_metadata):
new_state_dict = {
param_name: param_value
for param_name, param_value in state_dict.items()
if param_name not in self.old_to_new
}
return new_state_dict
class ParamRenamedModule(nn.Module):
"""
This class defines a nn.Module whose parameters are renamed. This is useful when you want to
reimplement a layer but make sure its state_dict and list of parameters are exactly the same
as another reference layer so that you can have a drop-in replacement that does not depend on
how your layer is actually implemented. In Opacus, this is used for DPLSTM, where our
implementation leverages submodules and requires alignment to the state_dict of nn.LSTM.
"""
def __init__(self, rename_map: Dict[str, str]):
"""
Initializes internal state. Subclass this instead of ``torch.nn.Module`` whenever you need
to rename your model's state.
Args:
rename_map: mapping from old name -> new name for each parameter you want renamed.
Note that this must be a 1:1 mapping!
"""
super().__init__()
self.old_to_new = rename_map
self.new_to_old = {v: k for k, v in rename_map.items()}
self._register_state_dict_hook(filter_out_old_keys)
def _register_renamed_parameters(self):
"""
Internal function. This function simply registers parameters under their new name. They will
automatically mask their duplicates coming from submodules. This trick works because
self.parameters() proceeds recursively from the top, going into submodules after processing
items at the current level, and will not return duplicates.
"""
for old_name, param in super().named_parameters():
if old_name in self.old_to_new:
new_name = self.old_to_new[old_name]
self.register_parameter(new_name, param)
def __setattr__(self, name: str, value: Union[Tensor, nn.Module]) -> None:
"""
Whenever you set an attribute, eg `self.linear`, this is called to actually register it in
any nn.Module. We rely on the masking trick explained in the docs for
``_register_renamed_parameters`` to make sure we replace things only once. If a new parameter
in the rename list is detected, we rename and mask it so next time this is called we will
no longer find it.
"""
super().__setattr__(name, value)
try:
self._register_renamed_parameters()
except AttributeError:
# At the very beginning of instantiation, this will fail because we do not yet have
# self._parameters. Safe to ignore.
pass
def load_state_dict(
self, state_dict: Dict[str, Tensor], strict: bool = True,
):
"""
Identical to ``torch.nn.Module.load_state_dict()`` but handles the renamed keys.
"""
# nn.Module recomputes its state_dict(), without calling the same logic as in self.state_dict()
# This means that it will find both the old and the renamed parameters. Both point to the
# same parameter object, so either of them will set it correctly. It will however complain
# that some keys are missing (the "old" keys). We can safely ignore those and process them
# accordingly
missing_keys, unexpected_keys = super().load_state_dict(
state_dict, strict=False
)
missing_keys = [k for k in missing_keys if k not in self.old_to_new]
if strict:
error_msgs = []
if len(unexpected_keys) > 0:
error_msgs.insert(
0,
"Unexpected key(s) in state_dict: {}. ".format(
", ".join('"{}"'.format(k) for k in unexpected_keys)
),
)
if len(missing_keys) > 0:
error_msgs.insert(
0,
"Missing key(s) in state_dict: {}. ".format(
", ".join('"{}"'.format(k) for k in missing_keys)
),
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
self.__class__.__name__, "\n\t".join(error_msgs)
)
)
return _IncompatibleKeys(missing_keys, unexpected_keys)
| [
"torch.nn.modules.module._IncompatibleKeys"
] | 1.9.0 | nhsx-mirror/SynthVAE | 64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651 |
1.9 | #%% -------- Import Libraries -------- #
# Standard imports
from selectors import EpollSelector
from tokenize import String
import numpy as np
import pandas as pd
import torch
# VAE is in other folder
import sys
sys.path.append("../")
# Opacus support for differential privacy
from opacus.utils.uniform_sampler import UniformWithReplacementSampler
# For VAE dataset formatting
from torch.utils.data import TensorDataset, DataLoader
# VAE functions
from VAE import Decoder, Encoder, VAE
# For datetime columns we need a transformer
from rdt.transformers import datetime
# Utility file contains all functions required to run notebook
from utils import (
set_seed,
mimic_pre_proc,
constraint_filtering,
plot_elbo,
plot_likelihood_breakdown,
plot_variable_distributions,
reverse_transformers,
)
from metrics import distribution_metrics
import optuna
import pickle
import warnings
warnings.filterwarnings("ignore") # We suppress warnings to avoid SDMETRICS throwing unique synthetic data warnings (i.e.
# data in synthetic set is not in the real data set) as well as SKLEARN throwing convergence warnings (pre-processing uses
# GMM from sklearn and this throws non convergence warnings)
set_seed(0)
filepath = ".../Private MIMIC Data/table_one_synthvae.csv"
# Load in the MIMIC dataset
data_supp = pd.read_csv(filepath)
# Save the original columns
original_categorical_columns = [
"ETHNICITY",
"DISCHARGE_LOCATION",
"GENDER",
"FIRST_CAREUNIT",
"VALUEUOM",
"LABEL",
]
original_continuous_columns = ["SUBJECT_ID", "VALUE", "age"]
original_datetime_columns = ["ADMITTIME", "DISCHTIME", "DOB", "CHARTTIME"]
# Drop DOD column as it contains NANS - for now
# data_supp = data_supp.drop('DOD', axis = 1)
original_columns = (
original_categorical_columns
+ original_continuous_columns
+ original_datetime_columns
)
#%% -------- Data Pre-Processing -------- #
pre_proc_method = "GMM"
(
x_train,
original_metric_set,
reordered_dataframe_columns,
continuous_transformers,
categorical_transformers,
datetime_transformers,
num_categories,
num_continuous,
) = mimic_pre_proc(data_supp=data_supp, pre_proc_method=pre_proc_method)
#%% -------- Create & Train VAE -------- #
# User defined parameters
# General training
batch_size = 32
n_epochs = 5
logging_freq = 1 # Number of epochs we should log the results to the user
patience = 5 # How many epochs should we allow the model train to see if
# improvement is made
delta = 10 # The difference between elbo values that registers an improvement
filepath = None # Where to save the best model
# Privacy params
differential_privacy = False # Do we want to implement differential privacy
sample_rate = 0.1 # Sampling rate
noise_scale = None # Noise multiplier - influences how much noise to add
target_eps = 1 # Target epsilon for privacy accountant
target_delta = 1e-5 # Target delta for privacy accountant
# Define the metrics you want the model to evaluate
# Define distributional metrics required - for sdv_baselines this is set by default
distributional_metrics = [
"SVCDetection",
"GMLogLikelihood",
"CSTest",
"KSTest",
"KSTestExtended",
"ContinuousKLDivergence",
"DiscreteKLDivergence",
]
gower = False
# Prepare data for interaction with torch VAE
Y = torch.Tensor(x_train)
dataset = TensorDataset(Y)
generator = None
sample_rate = batch_size / len(dataset)
data_loader = DataLoader(
dataset,
batch_sampler=UniformWithReplacementSampler(
num_samples=len(dataset), sample_rate=sample_rate, generator=generator
),
pin_memory=True,
generator=generator,
)
# -------- Define our Optuna trial -------- #
def objective(
trial,
gower,
distributional_metrics,
differential_privacy=False,
target_delta=1e-3,
target_eps=10.0,
n_epochs=50,
):
latent_dim = trial.suggest_int("Latent Dimension", 2, 128, step=2) # Hyperparam
hidden_dim = trial.suggest_int("Hidden Dimension", 32, 1024, step=32) # Hyperparam
encoder = Encoder(x_train.shape[1], latent_dim, hidden_dim=hidden_dim)
decoder = Decoder(latent_dim, num_continuous, num_categories=num_categories)
lr = trial.suggest_float("Learning Rate", 1e-3, 1e-2, step=1e-5)
vae = VAE(encoder, decoder, lr=1e-3) # lr hyperparam
C = trial.suggest_int("C", 10, 1e4, step=50)
if differential_privacy == True:
(
training_epochs,
log_elbo,
log_reconstruction,
log_divergence,
log_categorical,
log_numerical,
) = vae.diff_priv_train(
data_loader,
n_epochs=n_epochs,
C=C, # Hyperparam
target_eps=target_eps,
target_delta=target_delta,
sample_rate=sample_rate,
)
print(f"(epsilon, delta): {vae.get_privacy_spent(target_delta)}")
else:
(
training_epochs,
log_elbo,
log_reconstruction,
log_divergence,
log_categorical,
log_numerical,
) = vae.train(data_loader, n_epochs=n_epochs)
# -------- Generate Synthetic Data -------- #
synthetic_supp = constraint_filtering(
n_rows=data_supp.shape[0],
vae=vae,
reordered_cols=reordered_dataframe_columns,
data_supp_columns=data_supp.columns,
cont_transformers=continuous_transformers,
cat_transformers=categorical_transformers,
date_transformers=datetime_transformers,
pre_proc_method=pre_proc_method,
)
# -------- Datetime Handling -------- #
# If the dataset has datetimes then we need to re-convert these to a numerical
# Value representing seconds, this is so we can evaluate the metrics on them
metric_synthetic_supp = synthetic_supp.copy()
for index, column in enumerate(original_datetime_columns):
# Fit datetime transformer - converts to seconds
temp_datetime = datetime.DatetimeTransformer()
temp_datetime.fit(metric_synthetic_supp, columns=column)
metric_synthetic_supp = temp_datetime.transform(metric_synthetic_supp)
# -------- SDV Metrics -------- #
# Calculate the sdv metrics for SynthVAE
metrics = distribution_metrics(
gower_bool=gower,
distributional_metrics=distributional_metrics,
data_supp=data_supp,
synthetic_supp=synthetic_supp,
categorical_columns=original_categorical_columns,
continuous_columns=original_continuous_columns,
saving_filepath=None,
pre_proc_method=pre_proc_method,
)
# Optuna wants a list of values in float form
list_metrics = [metrics[i] for i in metrics.columns]
print(list_metrics)
return list_metrics
#%% -------- Run Hyperparam Optimisation -------- #
# If there is no study object in your folder then run and save the study so
# It can be resumed if needed
first_run = True # First run indicates if we are creating a new hyperparam study
if first_run == True:
if gower == True:
directions = ["maximize" for i in range(distributional_metrics.shape[0] + 1)]
else:
directions = ["maximize" for i in range(distributional_metrics.shape[0])]
study = optuna.create_study(directions=directions)
else:
with open("no_dp_MIMIC.pkl", "rb") as f:
study = pickle.load(f)
study.optimize(
lambda trial: objective(
trial,
gower=gower,
distributional_metrics=distributional_metrics,
differential_privacy=differential_privacy,
target_delta=target_delta,
target_eps=target_eps,
n_epochs=n_epochs,
),
n_trials=3,
gc_after_trial=True,
) # GC to avoid OOM
#%%
study.best_trials
#%% -------- Save The Study -------- #
# For a multi objective study we need to find the best trials and basically
# average between the 3 metrics to get the best trial
with open("no_dp_MIMIC.pkl", "wb") as f:
pickle.dump(study, f)
| [
"torch.Tensor",
"torch.utils.data.TensorDataset"
] | 1.9.0 | nhsx-mirror/SynthVAE | 64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651 |
1.3 | __all__ = ["EvaluatingInferencer"]
from dataclasses import dataclass
from typing import Sequence
import torch
import torch.utils.data as td
import utils
from datasets import BatchData
from .inferencer import Inferencer
from evaluators import FinegrainedEvaluator
@dataclass
class EvaluatingInferencer(Inferencer):
evaluators: Sequence[FinegrainedEvaluator] = tuple()
_requires_lexical_form: bool = utils.private_field(default=False)
def __post_init__(self):
super().__post_init__()
self._requires_lexical_form = any(e.requires_lexical_form
for e in self.evaluators)
def on_run_started(self, dataloader: td.DataLoader) -> td.DataLoader:
dataloader = super().on_run_started(dataloader)
for evaluator in self.evaluators:
evaluator.reset()
return dataloader
def on_batch_ended(self, batch: BatchData, pred: BatchData, outputs
) -> utils.TensorMap:
stats = dict(super().on_batch_ended(batch, pred, outputs))
batch_lex, pred_lex = None, None
if self._requires_lexical_form:
batch_lex = list(map(self.processor.lexicalize_global, batch))
pred_lex = list(map(self.processor.lexicalize_global, pred))
with torch.no_grad():
for evaluator in self.evaluators:
if evaluator.requires_lexical_form:
eval_stats = evaluator.update(batch_lex, pred_lex, outputs)
else:
eval_stats = evaluator.update(batch, pred, outputs)
stats.update(eval_stats or dict())
return stats
def on_run_ended(self, stats: utils.TensorMap) -> utils.TensorMap:
stats = dict(super().on_run_ended(stats))
with torch.no_grad():
for evaluator in self.evaluators:
stats.update(evaluator.get() or dict())
return stats
| [
"torch.no_grad"
] | 1.3.0 | kaniblu/vhda | 35941097ef552568c29f66cc55d8ce1927f34978 |
1.8 | import pytorch_lightning as pl
from torch.utils.data import DataLoader
class plDataModule(pl.LightningDataModule):
def __init__(
self,
train_dataset,
val_dataset,
test_dataset=None,
num_workers=2,
train_sampler=None,
train_shuffle=True,
train_batch_size=64,
train_drop_last=False,
val_batch_size=16,
val_shuffle=False,
val_sampler=None,
train_dataloader=None,
val_dataloader=None,
test_dataloader=None,
):
super().__init__()
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.test_dataset = test_dataset
self.num_workers = num_workers
self.train_sampler = train_sampler
self.train_shuffle = train_shuffle
self.train_batch_size = train_batch_size
self.train_drop_last = train_drop_last
self.val_batch_size = val_batch_size
self.val_shuffle = val_shuffle
self.val_sampler = val_sampler
self.created_train_dataloader = train_dataloader
self.created_val_dataloader = val_dataloader
self.created_test_dataloader = test_dataloader
def train_dataloader(self):
if self.created_train_dataloader:
return self.created_train_dataloader
return DataLoader(
self.train_dataset,
batch_size=self.train_batch_size,
sampler=self.train_sampler,
drop_last=self.train_drop_last,
num_workers=self.num_workers,
shuffle=self.train_shuffle if not self.train_sampler else False,
)
def val_dataloader(self):
if self.created_val_dataloader:
return self.created_val_dataloader
return DataLoader(
self.val_dataset,
batch_size=self.val_batch_size,
sampler=self.val_sampler,
drop_last=False,
num_workers=self.num_workers,
shuffle=self.val_shuffle if not self.val_sampler else False,
)
def test_dataloader(self):
if self.created_test_dataloader:
return self.created_test_dataloader
if self.test_dataset:
return DataLoader(
self.test_dataset,
batch_size=self.val_batch_size,
sampler=self.val_sampler,
drop_last=False,
num_workers=self.num_workers,
shuffle=self.val_shuffle if not self.val_sampler else False,
)
| [
"torch.utils.data.DataLoader"
] | 1.8.1 | Yongtae723/88_face | 7a761cb277be2a28984161be1e7ae2b73cadf085 |
1.0 | from torch import nn
from torch.optim import Adam
from mask_generators import ImageMaskGenerator, DropoutMaskGenerator
from nn_utils import ResBlock, MemoryLayer, SkipConnection
from prob_utils import normal_parse_params, GaussianLoss
# sampler from the model generative distribution
# here we return mean of the Gaussian to avoid white noise
def sampler(params):
return normal_parse_params(params).mean
def optimizer(parameters):
return Adam(parameters, lr=2e-4)
batch_size = 16
reconstruction_log_prob = GaussianLoss()
mask_generator = DropoutMaskGenerator(rate=0.9)
# improve train computational stability by dividing the loss
# by this scale factor right before backpropagation
vlb_scale_factor = 28 ** 2
class StupidLayer(nn.Module):
def __init__(self):
super(StupidLayer, self).__init__()
def forward(self,x):
return x[:,:,2:-2,2:-2]
def MLPBlock(dim):
return SkipConnection(
nn.BatchNorm2d(dim),
nn.LeakyReLU(),
nn.Conv2d(dim, dim, 1)
)
proposal_network = nn.Sequential(
nn.Conv2d(2, 8, 1,padding=2), #28,28,8
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
nn.AvgPool2d(2, 2), # 16, 16,8
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
nn.AvgPool2d(2, 2), nn.Conv2d(8, 16, 1), # 8, 8, 16
ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), # 8,8, 16?
nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1), # 4, 4, 32
ResBlock(32, 16), ResBlock(32, 16),
ResBlock(32, 16), ResBlock(32, 16),
nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1), # 2,2 64
ResBlock(64, 32), ResBlock(64, 32),
ResBlock(64, 32), ResBlock(64, 32),
nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1),
MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),
)
prior_network = nn.Sequential(
MemoryLayer('#0'),
nn.Conv2d(2, 8, 1, padding=2), # 28,28,8
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
MemoryLayer('#1'),
nn.AvgPool2d(2, 2),# 16,16,8
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
MemoryLayer('#2'),
nn.AvgPool2d(2, 2), nn.Conv2d(8, 16, 1),# 8,8,16
ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),
MemoryLayer('#3'),
nn.AvgPool2d(2, 2), nn.Conv2d(16, 32, 1), # 4,4 ,32
ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),
MemoryLayer('#4'),
nn.AvgPool2d(2, 2), nn.Conv2d(32, 64, 1), #2,2 64
ResBlock(64, 32), ResBlock(64, 32),
ResBlock(64, 32), ResBlock(64, 32),
MemoryLayer('#5'),
nn.AvgPool2d(2, 2), nn.Conv2d(64, 128, 1), #1,1,128
MLPBlock(128), MLPBlock(128), MLPBlock(128), MLPBlock(128),
)
generative_network = nn.Sequential(
nn.Conv2d(64, 64, 1),
MLPBlock(64), MLPBlock(64), MLPBlock(64), MLPBlock(64),
nn.Conv2d(64, 32, 1), nn.Upsample(scale_factor=2),
# MemoryLayer('#7', True), nn.Conv2d(384, 128, 1),
# ResBlock(128, 64), ResBlock(128, 64),
# ResBlock(128, 64), ResBlock(128, 64),
# nn.Conv2d(128, 64, 1), nn.Upsample(scale_factor=2),
# MemoryLayer('#6', True), nn.Conv2d(192, 64, 1),
# ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32), ResBlock(64, 32),
# nn.Conv2d(64, 32, 1), nn.Upsample(scale_factor=2),
MemoryLayer('#5', True), nn.Conv2d(96, 32, 1),
ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16), ResBlock(32, 16),
nn.Conv2d(32, 16, 1), nn.Upsample(scale_factor=2),
MemoryLayer('#4', True), nn.Conv2d(48, 16, 1),
ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8), ResBlock(16, 8),
nn.Conv2d(16, 8, 1), nn.Upsample(scale_factor=2),
MemoryLayer('#3', True), nn.Conv2d(24, 8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
nn.Upsample(scale_factor=2),
MemoryLayer('#2', True), nn.Conv2d(16, 8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
nn.Upsample(scale_factor=2), #32,32,8
# nn.Conv2dTranspose(8,8,stride=2,padding=1)
MemoryLayer('#1', True), nn.Conv2d(16, 8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
StupidLayer(),
MemoryLayer('#0', True), nn.Conv2d(10, 8, 1),
ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8), ResBlock(8, 8),
nn.Conv2d(8, 2, 1),
)
| [
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.optim.Adam",
"torch.nn.LeakyReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d"
] | 1.0.1 | HugoSenetaire/vaeac | 451d34dd4986c52f2f37c508f03ee3db9e7408d3 |
1.3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Johns Hopkins University (Shinji Watanabe)
# Northwestern Polytechnical University (Pengcheng Guo)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""ConvolutionModule definition."""
from torch import nn
class ConvolutionModule(nn.Module):
"""ConvolutionModule in Conformer model.
Args:
channels (int): The number of channels of conv layers.
kernel_size (int): Kernerl size of conv layers.
"""
def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):
"""Construct an ConvolutionModule object."""
super(ConvolutionModule, self).__init__()
# kernerl_size should be a odd number for 'SAME' padding
assert (kernel_size - 1) % 2 == 0
self.pointwise_conv1 = nn.Conv1d(
channels,
2 * channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.depthwise_conv = nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
groups=channels,
bias=bias,
)
self.norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Conv1d(
channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.activation = activation
def forward(self, x):
"""Compute convolution module.
Args:
x (torch.Tensor): Input tensor (#batch, time, channels).
Returns:
torch.Tensor: Output tensor (#batch, time, channels).
"""
# exchange the temporal dimension and the feature dimension
x = x.transpose(1, 2)
# GLU mechanism
x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
# 1D Depthwise Conv
x = self.depthwise_conv(x)
x = self.activation(self.norm(x))
x = self.pointwise_conv2(x)
return x.transpose(1, 2)
| [
"torch.nn.ReLU",
"torch.nn.functional.glu",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d"
] | 1.3.0 | A-Quarter-Mile/Muskits | 60d80727d2ec6b8ec405502d67796e8df319ea82 |
1.5 | """ Implemenation of uncertainty-aware option selection
"""
from abc import ABC, abstractmethod
from typing import Tuple
import torch
from torch import BoolTensor, LongTensor, Tensor
from torch.distributions import Categorical
from rainy.net.policy import BernoulliPolicy
def _debug_minmax(name: str, t: Tensor) -> None:
print(f"{name}: {t.max().item()}, {t.min().item()}")
class OptionSelectImpl(ABC):
worker_indices: Tensor
EPS = 0.001
INF = 1e9
@abstractmethod
def logmu_weight(self) -> float:
pass
def _logmu(self, qo: Tensor, logmu_o_xs: Tensor) -> Tensor:
return qo - self.logmu_weight() * logmu_o_xs
def _eval_sample_options(
self, qo: Tensor, beta: BernoulliPolicy,
) -> Tuple[LongTensor, BoolTensor]:
"""Sample options by ε-Greedy
"""
batch_size = qo.size(0)
prev_options = self.eval_prev_options[:batch_size]
current_beta = beta[self.worker_indices[:batch_size], prev_options]
opt_terminals = current_beta.action().bool()
use_new_options = self.eval_is_initial_states[:batch_size] | opt_terminals
new_options = self.eval_opt_explorer.select_from_value(qo, same_device=True)
options = torch.where(use_new_options, new_options, prev_options)
return options, use_new_options
def _sample_options(
self, qo: Tensor, beta: BernoulliPolicy, mu_o_xs: Categorical,
) -> Tuple[LongTensor, BoolTensor]:
"""
Select new options.
Returns options and booltensor that indicates which options ended.
"""
masks = self.storage.masks[-1]
prev_options = self.prev_options
current_beta = beta[self.worker_indices[: qo.size(0)], prev_options]
opt_terminals = current_beta.action().bool()
use_new_options = (1.0 - masks).bool() | opt_terminals
# mask out current options
opt_mask = torch.zeros_like(qo)
opt_mask[self.worker_indices, prev_options] += opt_terminals * -self.INF
if self.config.option_selector == "epsg":
new_options = self.opt_explorer.select_from_value(
qo + opt_mask, same_device=True
)
elif self.config.option_selector == "logp":
new_options = self._logmu(qo + opt_mask, mu_o_xs.logits).argmax(-1)
elif self.config.option_selector == "epsg-logp":
value = self._logmu(qo + opt_mask, mu_o_xs.logits)
new_options = self.opt_explorer.select_from_value(value, same_device=True)
else:
raise NotImplementedError(
f"Invalid option selector {self.config.opt_selector}"
)
self.option_counter[new_options[use_new_options].cpu().numpy()] += 1
options = torch.where(use_new_options, new_options, prev_options)
return options, opt_terminals
| [
"torch.zeros_like",
"torch.where"
] | 1.5.0 | kngwyu/infomax-option-critic | 9d907c041c1d0280db9b23eb2fdf9e0033e33bf3 |
1.0 | from __future__ import absolute_import, division, print_function, unicode_literals
import six
import logging
from collections import OrderedDict
import numpy as np
import time
import torch
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.nn.utils import clip_grad_norm_
logger = logging.getLogger(__name__)
class EarlyStoppingException(Exception):
pass
class NanException(Exception):
pass
class NumpyDataset(Dataset):
""" Dataset for numpy arrays with explicit memmap support """
def __init__(self, *arrays, **kwargs):
self.dtype = kwargs.get("dtype", torch.float)
self.memmap = []
self.data = []
self.n = None
for array in arrays:
if self.n is None:
self.n = array.shape[0]
assert array.shape[0] == self.n
if isinstance(array, np.memmap):
self.memmap.append(True)
self.data.append(array)
else:
self.memmap.append(False)
tensor = torch.from_numpy(array).to(self.dtype)
self.data.append(tensor)
def __getitem__(self, index):
items = []
for memmap, array in zip(self.memmap, self.data):
if memmap:
tensor = np.array(array[index])
items.append(torch.from_numpy(tensor).to(self.dtype))
else:
items.append(array[index])
return tuple(items)
def __len__(self):
return self.n
class Trainer(object):
""" Trainer class. Any subclass has to implement the forward_pass() function. """
def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):
self._init_timer()
self._timer(start="ALL")
self._timer(start="initialize model")
self.model = model
self.run_on_gpu = run_on_gpu and torch.cuda.is_available()
self.device = torch.device("cuda" if self.run_on_gpu else "cpu")
self.dtype = torch.double if double_precision else torch.float
self.n_workers = n_workers
self.model = self.model.to(self.device, self.dtype)
logger.info(
"Training on %s with %s precision",
"GPU" if self.run_on_gpu else "CPU",
"double" if double_precision else "single",
)
self._timer(stop="initialize model")
self._timer(stop="ALL")
def train(
self,
data,
loss_functions,
loss_weights=None,
loss_labels=None,
epochs=50,
batch_size=100,
optimizer=optim.Adam,
optimizer_kwargs=None,
initial_lr=0.001,
final_lr=0.0001,
data_val=None,
validation_split=0.25,
early_stopping=True,
early_stopping_patience=None,
clip_gradient=None,
verbose="some",
):
self._timer(start="ALL")
self._timer(start="check data")
logger.debug("Initialising training data")
self.check_data(data)
self.report_data(data)
if data_val is not None:
logger.debug("Found external validation data set")
self.check_data(data_val)
self.report_data(data_val)
self._timer(stop="check data", start="make dataset")
data_labels, dataset = self.make_dataset(data)
if data_val is not None:
_, dataset_val = self.make_dataset(data_val)
else:
dataset_val = None
self._timer(stop="make dataset", start="make dataloader")
train_loader, val_loader = self.make_dataloaders(dataset, dataset_val, validation_split, batch_size)
self._timer(stop="make dataloader", start="setup optimizer")
logger.debug("Setting up optimizer")
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs
opt = optimizer(self.model.parameters(), lr=initial_lr, **optimizer_kwargs)
early_stopping = early_stopping and (validation_split is not None) and (epochs > 1)
best_loss, best_model, best_epoch = None, None, None
if early_stopping and early_stopping_patience is None:
logger.debug("Using early stopping with infinite patience")
elif early_stopping:
logger.debug("Using early stopping with patience %s", early_stopping_patience)
else:
logger.debug("No early stopping")
self._timer(stop="setup optimizer", start="initialize training")
n_losses = len(loss_functions)
loss_weights = [1.0] * n_losses if loss_weights is None else loss_weights
# Verbosity
if verbose == "all": # Print output after every epoch
n_epochs_verbose = 1
elif verbose == "many": # Print output after 2%, 4%, ..., 100% progress
n_epochs_verbose = max(int(round(epochs / 50, 0)), 1)
elif verbose == "some": # Print output after 10%, 20%, ..., 100% progress
n_epochs_verbose = max(int(round(epochs / 20, 0)), 1)
elif verbose == "few": # Print output after 20%, 40%, ..., 100% progress
n_epochs_verbose = max(int(round(epochs / 5, 0)), 1)
elif verbose == "none": # Never print output
n_epochs_verbose = epochs + 2
else:
raise ValueError("Unknown value %s for keyword verbose", verbose)
logger.debug("Will print training progress every %s epochs", n_epochs_verbose)
logger.debug("Beginning main training loop")
losses_train, losses_val = [], []
self._timer(stop="initialize training")
# Loop over epochs
for i_epoch in range(epochs):
logger.debug("Training epoch %s / %s", i_epoch + 1, epochs)
self._timer(start="set lr")
lr = self.calculate_lr(i_epoch, epochs, initial_lr, final_lr)
self.set_lr(opt, lr)
logger.debug("Learning rate: %s", lr)
self._timer(stop="set lr")
loss_val = None
try:
loss_train, loss_val, loss_contributions_train, loss_contributions_val = self.epoch(
i_epoch, data_labels, train_loader, val_loader, opt, loss_functions, loss_weights, clip_gradient
)
losses_train.append(loss_train)
losses_val.append(loss_val)
except NanException:
logger.info("Ending training during epoch %s because NaNs appeared", i_epoch + 1)
break
self._timer(start="early stopping")
if early_stopping:
try:
best_loss, best_model, best_epoch = self.check_early_stopping(
best_loss, best_model, best_epoch, loss_val, i_epoch, early_stopping_patience
)
except EarlyStoppingException:
logger.info("Early stopping: ending training after %s epochs", i_epoch + 1)
break
self._timer(stop="early stopping", start="report epoch")
verbose_epoch = (i_epoch + 1) % n_epochs_verbose == 0
self.report_epoch(
i_epoch,
loss_labels,
loss_train,
loss_val,
loss_contributions_train,
loss_contributions_val,
verbose=verbose_epoch,
)
self._timer(stop="report epoch")
self._timer(start="early stopping")
if early_stopping and len(losses_val) > 0:
self.wrap_up_early_stopping(best_model, loss_val, best_loss, best_epoch)
self._timer(stop="early stopping")
logger.debug("Training finished")
self._timer(stop="ALL")
self._report_timer()
return np.array(losses_train), np.array(losses_val)
@staticmethod
def report_data(data):
logger.debug("Training data:")
for key, value in six.iteritems(data):
if value is None:
logger.debug(" %s: -", key)
else:
logger.debug(
" %s: shape %s, first %s, mean %s, min %s, max %s",
key,
value.shape,
value[0],
np.mean(value, axis=0),
np.min(value, axis=0),
np.max(value, axis=0),
)
@staticmethod
def check_data(data):
pass
def make_dataset(self, data):
data_arrays = []
data_labels = []
for key, value in six.iteritems(data):
data_labels.append(key)
data_arrays.append(value)
dataset = NumpyDataset(*data_arrays, dtype=self.dtype)
return data_labels, dataset
def make_dataloaders(self, dataset, dataset_val, validation_split, batch_size):
if dataset_val is None and (validation_split is None or validation_split <= 0.0):
train_loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers
)
val_loader = None
elif dataset_val is not None:
train_loader = DataLoader(
dataset, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers
)
val_loader = DataLoader(
dataset_val, batch_size=batch_size, shuffle=True, pin_memory=self.run_on_gpu, num_workers=self.n_workers
)
else:
assert 0.0 < validation_split < 1.0, "Wrong validation split: {}".format(validation_split)
n_samples = len(dataset)
indices = list(range(n_samples))
split = int(np.floor(validation_split * n_samples))
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(
dataset,
sampler=train_sampler,
batch_size=batch_size,
pin_memory=self.run_on_gpu,
num_workers=self.n_workers,
)
val_loader = DataLoader(
dataset,
sampler=val_sampler,
batch_size=batch_size,
pin_memory=self.run_on_gpu,
num_workers=self.n_workers,
)
return train_loader, val_loader
@staticmethod
def calculate_lr(i_epoch, n_epochs, initial_lr, final_lr):
if n_epochs == 1:
return initial_lr
return initial_lr * (final_lr / initial_lr) ** float(i_epoch / (n_epochs - 1.0))
@staticmethod
def set_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def epoch(
self,
i_epoch,
data_labels,
train_loader,
val_loader,
optimizer,
loss_functions,
loss_weights,
clip_gradient=None,
):
n_losses = len(loss_functions)
self.model.train()
loss_contributions_train = np.zeros(n_losses)
loss_train = 0.0
self._timer(start="load training batch")
for i_batch, batch_data in enumerate(train_loader):
batch_data = OrderedDict(list(zip(data_labels, batch_data)))
self._timer(stop="load training batch")
batch_loss, batch_loss_contributions = self.batch_train(
batch_data, loss_functions, loss_weights, optimizer, clip_gradient
)
loss_train += batch_loss
for i, batch_loss_contribution in enumerate(batch_loss_contributions):
loss_contributions_train[i] += batch_loss_contribution
self.report_batch(i_epoch, i_batch, batch_loss)
self._timer(start="load training batch")
self._timer(stop="load training batch")
loss_contributions_train /= len(train_loader)
loss_train /= len(train_loader)
if val_loader is not None:
self.model.eval()
loss_contributions_val = np.zeros(n_losses)
loss_val = 0.0
self._timer(start="load validation batch")
for i_batch, batch_data in enumerate(val_loader):
batch_data = OrderedDict(list(zip(data_labels, batch_data)))
self._timer(stop="load validation batch")
batch_loss, batch_loss_contributions = self.batch_val(batch_data, loss_functions, loss_weights)
loss_val += batch_loss
for i, batch_loss_contribution in enumerate(batch_loss_contributions):
loss_contributions_val[i] += batch_loss_contribution
self._timer(start="load validation batch")
self._timer(stop="load validation batch")
loss_contributions_val /= len(val_loader)
loss_val /= len(val_loader)
else:
loss_contributions_val = None
loss_val = None
return loss_train, loss_val, loss_contributions_train, loss_contributions_val
def batch_train(self, batch_data, loss_functions, loss_weights, optimizer, clip_gradient=None):
self._timer(start="training forward pass")
loss_contributions = self.forward_pass(batch_data, loss_functions)
self._timer(stop="training forward pass", start="training sum losses")
loss = self.sum_losses(loss_contributions, loss_weights)
self._timer(stop="training sum losses", start="optimizer step")
self.optimizer_step(optimizer, loss, clip_gradient)
self._timer(stop="optimizer step", start="training sum losses")
loss = loss.item()
loss_contributions = [contrib.item() for contrib in loss_contributions]
self._timer(stop="training sum losses")
return loss, loss_contributions
def batch_val(self, batch_data, loss_functions, loss_weights):
self._timer(start="validation forward pass")
loss_contributions = self.forward_pass(batch_data, loss_functions)
self._timer(stop="validation forward pass", start="validation sum losses")
loss = self.sum_losses(loss_contributions, loss_weights)
loss = loss.item()
loss_contributions = [contrib.item() for contrib in loss_contributions]
self._timer(stop="validation sum losses")
return loss, loss_contributions
def forward_pass(self, batch_data, loss_functions):
"""
Forward pass of the model. Needs to be implemented by any subclass.
Parameters
----------
batch_data : OrderedDict with str keys and Tensor values
The data of the minibatch.
loss_functions : list of function
Loss functions.
Returns
-------
losses : list of Tensor
Losses as scalar pyTorch tensors.
"""
raise NotImplementedError
@staticmethod
def sum_losses(contributions, weights):
loss = weights[0] * contributions[0]
for _w, _l in zip(weights[1:], contributions[1:]):
loss = loss + _w * _l
return loss
def optimizer_step(self, optimizer, loss, clip_gradient):
self._timer(start="opt: zero grad")
optimizer.zero_grad()
self._timer(stop="opt: zero grad", start="opt: backward")
loss.backward()
self._timer(start="opt: clip grad norm", stop="opt: backward")
if clip_gradient is not None:
clip_grad_norm_(self.model.parameters(), clip_gradient)
self._timer(stop="opt: clip grad norm", start="opt: step")
optimizer.step()
self._timer(stop="opt: step")
def check_early_stopping(self, best_loss, best_model, best_epoch, loss, i_epoch, early_stopping_patience=None):
if best_loss is None or loss < best_loss:
best_loss = loss
best_model = self.model.state_dict()
best_epoch = i_epoch
if early_stopping_patience is not None and i_epoch - best_epoch > early_stopping_patience >= 0:
raise EarlyStoppingException
if loss is None or not np.isfinite(loss):
raise EarlyStoppingException
return best_loss, best_model, best_epoch
@staticmethod
def report_batch(i_epoch, i_batch, loss_train):
if i_batch in [0, 1, 10, 100, 1000]:
logger.debug(" Epoch {:>3d}, batch {:>3d}: loss {:>8.5f}".format(i_epoch + 1, i_batch + 1, loss_train))
@staticmethod
def report_epoch(
i_epoch, loss_labels, loss_train, loss_val, loss_contributions_train, loss_contributions_val, verbose=False
):
logging_fn = logger.info if verbose else logger.debug
def contribution_summary(labels, contributions):
summary = ""
for i, (label, value) in enumerate(zip(labels, contributions)):
if i > 0:
summary += ", "
summary += "{}: {:>6.3f}".format(label, value)
return summary
train_report = " Epoch {:>3d}: train loss {:>8.5f} ({})".format(
i_epoch + 1, loss_train, contribution_summary(loss_labels, loss_contributions_train)
)
logging_fn(train_report)
if loss_val is not None:
val_report = " val. loss {:>8.5f} ({})".format(
loss_val, contribution_summary(loss_labels, loss_contributions_val)
)
logging_fn(val_report)
def wrap_up_early_stopping(self, best_model, currrent_loss, best_loss, best_epoch):
if best_loss is None or not np.isfinite(best_loss):
logger.warning("Best loss is None, cannot wrap up early stopping")
elif currrent_loss is None or not np.isfinite(currrent_loss) or best_loss < currrent_loss:
logger.info(
"Early stopping after epoch %s, with loss %8.5f compared to final loss %8.5f",
best_epoch + 1,
best_loss,
currrent_loss,
)
self.model.load_state_dict(best_model)
else:
logger.info("Early stopping did not improve performance")
@staticmethod
def _check_for_nans(label, *tensors):
for tensor in tensors:
if tensor is None:
continue
if torch.isnan(tensor).any():
logger.warning("%s contains NaNs, aborting training!", label)
raise NanException
def _init_timer(self):
self.timer = OrderedDict()
self.time_started = OrderedDict()
def _timer(self, start=None, stop=None):
if start is not None:
self.time_started[start] = time.time()
if stop is not None:
if stop not in list(self.time_started.keys()):
logger.warning("Timer for task %s has been stopped without being started before", stop)
return
dt = time.time() - self.time_started[stop]
del self.time_started[stop]
if stop in list(self.timer.keys()):
self.timer[stop] += dt
else:
self.timer[stop] = dt
def _report_timer(self):
logger.info("Training time spend on:")
for key, value in six.iteritems(self.timer):
logger.info(" {:>32s}: {:6.2f}h".format(key, value / 3600.0))
class SingleParameterizedRatioTrainer(Trainer):
def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):
super(SingleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)
self.calculate_model_score = True
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "theta" not in data_keys or "y" not in data_keys:
raise ValueError("Missing required information 'x', 'theta', or 'y' in training data!")
for key in data_keys:
if key not in ["x", "theta", "y", "r_xz", "t_xz"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
self.calculate_model_score = "t_xz" in data_keys
if self.calculate_model_score:
logger.debug("Model score will be calculated")
else:
logger.debug("Model score will not be calculated")
def forward_pass(self, batch_data, loss_functions):
self._timer(start="fwd: move data")
theta = batch_data["theta"].to(self.device, self.dtype, non_blocking=True)
x = batch_data["x"].to(self.device, self.dtype, non_blocking=True)
y = batch_data["y"].to(self.device, self.dtype, non_blocking=True)
try:
r_xz = batch_data["r_xz"].to(self.device, self.dtype, non_blocking=True)
except KeyError:
r_xz = None
try:
t_xz = batch_data["t_xz"].to(self.device, self.dtype, non_blocking=True)
except KeyError:
t_xz = None
self._timer(stop="fwd: move data", start="fwd: check for nans")
self._check_for_nans("Training data", theta, x, y)
self._check_for_nans("Augmented training data", r_xz, t_xz)
self._timer(start="fwd: model.forward", stop="fwd: check for nans")
if self.calculate_model_score:
theta.requires_grad = True
s_hat, log_r_hat, t_hat = self.model(theta, x, track_score=self.calculate_model_score, return_grad_x=False)
self._timer(stop="fwd: model.forward", start="fwd: check for nans")
self._check_for_nans("Model output", log_r_hat, s_hat)
self._check_for_nans("Model score", t_hat)
self._timer(start="fwd: calculate losses", stop="fwd: check for nans")
losses = [loss_function(s_hat, log_r_hat, t_hat, None, y, r_xz, t_xz, None) for loss_function in loss_functions]
self._timer(stop="fwd: calculate losses", start="fwd: check for nans")
self._check_for_nans("Loss", *losses)
self._timer(stop="fwd: check for nans")
return losses
class DoubleParameterizedRatioTrainer(Trainer):
def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):
super(DoubleParameterizedRatioTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)
self.calculate_model_score = True
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "theta0" not in data_keys or "theta1" not in data_keys or "y" not in data_keys:
raise ValueError("Missing required information 'x', 'theta0', 'theta1', or 'y' in training data!")
for key in data_keys:
if key not in ["x", "theta0", "theta1", "y", "r_xz", "t_xz0", "t_xz1"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
self.calculate_model_score = "t_xz0" in data_keys or "t_xz1" in data_keys
if self.calculate_model_score:
logger.debug("Model score will be calculated")
else:
logger.debug("Model score will not be calculated")
def forward_pass(self, batch_data, loss_functions):
self._timer(start="fwd: move data")
theta0 = batch_data["theta0"].to(self.device, self.dtype, non_blocking=True)
theta1 = batch_data["theta1"].to(self.device, self.dtype, non_blocking=True)
x = batch_data["x"].to(self.device, self.dtype, non_blocking=True)
y = batch_data["y"].to(self.device, self.dtype, non_blocking=True)
try:
r_xz = batch_data["r_xz"].to(self.device, self.dtype, non_blocking=True)
except KeyError:
r_xz = None
try:
t_xz0 = batch_data["t_xz0"].to(self.device, self.dtype, non_blocking=True)
except KeyError:
t_xz0 = None
try:
t_xz1 = batch_data["t_xz1"].to(self.device, self.dtype, non_blocking=True)
except KeyError:
t_xz1 = None
self._timer(stop="fwd: move data", start="fwd: check for nans")
self._check_for_nans("Training data", theta0, theta1, x, y)
self._check_for_nans("Augmented training data", r_xz, t_xz0, t_xz1)
self._timer(start="fwd: model.forward", stop="fwd: check for nans")
if self.calculate_model_score:
theta0.requires_grad = True
theta1.requires_grad = True
s_hat, log_r_hat, t_hat0, t_hat1 = self.model(
theta0, theta1, x, track_score=self.calculate_model_score, return_grad_x=False
)
self._timer(stop="fwd: model.forward", start="fwd: check for nans")
self._check_for_nans("Model output", s_hat, log_r_hat, t_hat0, t_hat1)
self._timer(start="fwd: calculate losses", stop="fwd: check for nans")
losses = [
loss_function(s_hat, log_r_hat, t_hat0, t_hat1, y, r_xz, t_xz0, t_xz1) for loss_function in loss_functions
]
self._timer(stop="fwd: calculate losses", start="fwd: check for nans")
self._check_for_nans("Loss", *losses)
self._timer(stop="fwd: check for nans")
return losses
class LocalScoreTrainer(Trainer):
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "t_xz" not in data_keys:
raise ValueError("Missing required information 'x' or 't_xz' in training data!")
for key in data_keys:
if key not in ["x", "t_xz"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
def forward_pass(self, batch_data, loss_functions):
self._timer(start="fwd: move data")
x = batch_data["x"].to(self.device, self.dtype, non_blocking=True)
t_xz = batch_data["t_xz"].to(self.device, self.dtype, non_blocking=True)
self._timer(stop="fwd: move data", start="fwd: check for nans")
self._check_for_nans("Training data", x)
self._check_for_nans("Augmented training data", t_xz)
self._timer(start="fwd: model.forward", stop="fwd: check for nans")
t_hat = self.model(x)
self._timer(stop="fwd: model.forward", start="fwd: check for nans")
self._check_for_nans("Model output", t_hat)
self._timer(start="fwd: calculate losses", stop="fwd: check for nans")
losses = [loss_function(t_hat, t_xz) for loss_function in loss_functions]
self._timer(stop="fwd: calculate losses", start="fwd: check for nans")
self._check_for_nans("Loss", *losses)
self._timer(stop="fwd: check for nans")
return losses
class FlowTrainer(Trainer):
def __init__(self, model, run_on_gpu=True, double_precision=False, n_workers=8):
super(FlowTrainer, self).__init__(model, run_on_gpu, double_precision, n_workers)
self.calculate_model_score = True
def check_data(self, data):
data_keys = list(data.keys())
if "x" not in data_keys or "theta" not in data_keys:
raise ValueError("Missing required information 'x' or 'theta' in training data!")
for key in data_keys:
if key not in ["x", "theta", "t_xz"]:
logger.warning("Unknown key %s in training data! Ignoring it.", key)
self.calculate_model_score = "t_xz" in data_keys
if self.calculate_model_score:
logger.debug("Model score will be calculated")
else:
logger.debug("Model score will not be calculated")
def forward_pass(self, batch_data, loss_functions):
self._timer(start="fwd: move data")
x = batch_data["x"].to(self.device, self.dtype, non_blocking=True)
theta = batch_data["theta"].to(self.device, self.dtype, non_blocking=True)
try:
t_xz = batch_data["t_xz"].to(self.device, self.dtype, non_blocking=True)
except KeyError:
t_xz = None
self._timer(stop="fwd: move data", start="fwd: check for nans")
self._check_for_nans("Training data", theta, x)
self._check_for_nans("Augmented training data", t_xz)
self._timer(start="fwd: model.forward", stop="fwd: check for nans")
if self.calculate_model_score:
theta.requires_grad = True
_, log_likelihood, t_hat = self.model.log_likelihood_and_score(theta, x)
else:
_, log_likelihood = self.model.log_likelihood(theta, x)
t_hat = None
self._timer(stop="fwd: model.forward", start="fwd: check for nans")
self._check_for_nans("Model output", log_likelihood, t_hat)
self._timer(start="fwd: calculate losses", stop="fwd: check for nans")
losses = [loss_function(log_likelihood, t_hat, t_xz) for loss_function in loss_functions]
self._timer(stop="fwd: calculate losses", start="fwd: check for nans")
self._check_for_nans("Loss", *losses)
self._timer(stop="fwd: check for nans")
return losses
| [
"torch.device",
"torch.isnan",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler"
] | 1.0.0 | siyuchen95/madminer | dfcbd7ee26c47dd294610c195fafce15f74c10eb |
1.10 | import numpy as np, sys, os, random, pdb, json, uuid, time, argparse
from pprint import pprint
import logging, logging.config
from collections import defaultdict as ddict
# from ordered_set import OrderedSet
# PyTorch related imports
import torch
from torch.nn import functional as F
from torch.nn.init import xavier_normal_
from torch.utils.data import DataLoader
from torch.nn import Parameter
# from torch_scatter import scatter_add
from .util_scatter import scatter_add
try:
from torch import irfft
from torch import rfft
except ImportError:
from torch.fft import irfft2
from torch.fft import rfft2
def rfft(x, d):
t = rfft2(x, dim=(-d))
return torch.stack((t.real, t.imag), -1)
def irfft(x, d, signal_sizes):
return irfft2(torch.complex(x[:, :, 0], x[:, :, 1]), s=signal_sizes, dim=(-d))
np.set_printoptions(precision=4)
def set_gpu(gpus):
"""
Sets the GPU to be used for the run
Parameters
----------
gpus: List of GPUs to be used for the run
Returns
-------
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpus
def get_logger(name, log_dir, config_dir):
"""
Creates a logger object
Parameters
----------
name: Name of the logger file
log_dir: Directory where logger file needs to be stored
config_dir: Directory from where log_config.json needs to be read
Returns
-------
A logger object which writes to both file and stdout
"""
config_dict = json.load(open(config_dir + 'log_config.json'))
config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')
logging.config.dictConfig(config_dict)
logger = logging.getLogger(name)
std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logging.Formatter(std_out_format))
logger.addHandler(consoleHandler)
return logger
def get_combined_results(left_results, right_results):
results = {}
count = float(left_results['count'])
results['left_mr'] = round(left_results['mr'] / count, 5)
results['left_mrr'] = round(left_results['mrr'] / count, 5)
results['right_mr'] = round(right_results['mr'] / count, 5)
results['right_mrr'] = round(right_results['mrr'] / count, 5)
results['mr'] = round((left_results['mr'] + right_results['mr']) / (2 * count), 5)
results['mrr'] = round((left_results['mrr'] + right_results['mrr']) / (2 * count), 5)
for k in range(10):
results['left_hits@{}'.format(k + 1)] = round(left_results['hits@{}'.format(k + 1)] / count, 5)
results['right_hits@{}'.format(k + 1)] = round(right_results['hits@{}'.format(k + 1)] / count, 5)
results['hits@{}'.format(k + 1)] = round(
(left_results['hits@{}'.format(k + 1)] + right_results['hits@{}'.format(k + 1)]) / (2 * count), 5)
return results
def get_param(shape):
param = Parameter(torch.Tensor(*shape));
xavier_normal_(param.data)
return param
def com_mult(a, b):
r1, i1 = a[..., 0], a[..., 1]
r2, i2 = b[..., 0], b[..., 1]
return torch.stack([r1 * r2 - i1 * i2, r1 * i2 + i1 * r2], dim=-1)
def conj(a):
a[..., 1] = -a[..., 1]
return a
def cconv(a, b):
return irfft(com_mult(rfft(a, 1), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def ccorr(a, b):
return irfft(com_mult(conj(rfft(a, 1)), rfft(b, 1)), 1, signal_sizes=(a.shape[-1],))
def construct_adj(train_dataset, relation_dict_len):
edge_index, edge_type = [], []
if train_dataset.data.shape[1] == 3: # score_based
for sub, rel, obj in train_dataset.data:
edge_index.append((sub, obj))
edge_type.append(rel)
for sub, rel, obj in train_dataset.data:
edge_index.append((obj, sub))
edge_type.append(rel + relation_dict_len)
else: # classification-based
label = train_dataset.label_data
for j,(sub, rel) in enumerate(train_dataset.data):
for elem in torch.nonzero(label[j]):
e2_idx = elem.item()
edge_index.append((sub,e2_idx))
edge_type.append(rel)
for j,(sub, rel) in enumerate(train_dataset.data):
for elem in torch.nonzero(label[j]):
e2_idx = elem.item()
edge_index.append((e2_idx,sub))
edge_type.append(rel + relation_dict_len)
return edge_index,edge_type | [
"torch.rfft",
"torch.nonzero",
"torch.stack",
"torch.complex",
"torch.nn.init.xavier_normal_",
"torch.Tensor",
"torch.fft.rfft2"
] | 1.10.1 | jinzhuoran/CogKGE | b0e819a1d34cf61a7d70c33808da3377b73c8fd6 |
1.10 | import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Linear(in_features, out_features, bias=False)
# self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.weight, gain=1.414)
# self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))
self.a1 = nn.Parameter(torch.zeros(size=(out_features, 1)))
self.a2 = nn.Parameter(torch.zeros(size=(out_features, 1)))
nn.init.xavier_uniform_(self.a1.data, gain=1.414)
nn.init.xavier_uniform_(self.a2.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = self.W(input)
# [batch_size, N, out_features]
batch_size, N, _ = h.size()
middle_result1 = torch.matmul(h, self.a1).expand(-1, -1, N)
middle_result2 = torch.matmul(h, self.a2).expand(-1, -1, N).transpose(1, 2)
e = self.leakyrelu(middle_result1 + middle_result2)
attention = e.masked_fill(adj == 0, -1e9)
attention = F.softmax(attention, dim=2)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, layer):
super(GAT, self).__init__()
self.dropout = dropout
self.layer = layer
if self.layer == 1:
self.attentions = [GraphAttentionLayer(nfeat, nclass, dropout=dropout, alpha=alpha, concat=True) for _ in
range(nheads)]
else:
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in
range(nheads)]
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
def forward(self, vector, adj):
x = vector.unsqueeze(0)
x = F.dropout(x, self.dropout, training=self.training)
if self.layer == 1:
x = torch.stack([att(x, adj) for att in self.attentions], dim=2)
x = x.sum(2)
x = F.dropout(x, self.dropout, training=self.training)
return F.log_softmax(x, dim=2)
else:
x = torch.cat([att(x, adj) for att in self.attentions], dim=2)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=2)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.functional.dropout",
"torch.nn.LeakyReLU",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.elu",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.matmul"
] | 1.10.1 | jinzhuoran/CogKGE | 70d851d6489600c1e90eb25b0388a3ceba2f078c |
1.10 | import sys
import torch
from pathlib import Path
from torch.utils.data import RandomSampler
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0].parents[0].parents[0] # CogKGE root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add CogKGE root directory to PATH
from cogkge import *
device = init_cogkge(device_id="1", seed=0)
loader = EVENTKG240KLoader(dataset_path="../../dataset", download=True)
train_data, valid_data, test_data = loader.load_all_data()
node_lut, relation_lut, time_lut = loader.load_all_lut()
processor = EVENTKG240KProcessor(node_lut, relation_lut, time_lut, reprocess=True,mode="normal")
train_dataset = processor.process(train_data)
valid_dataset = processor.process(valid_data)
test_dataset = processor.process(test_data)
node_lut, relation_lut, time_lut = processor.process_lut()
train_sampler = RandomSampler(train_dataset)
valid_sampler = RandomSampler(valid_dataset)
test_sampler = RandomSampler(test_dataset)
model = TransH(entity_dict_len=len(node_lut),
relation_dict_len=len(relation_lut),
embedding_dim=50,
p_norm=1,
penalty_weight=0.1)
loss = MarginLoss(margin=1.0)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0)
metric = Link_Prediction(node_lut=node_lut,
relation_lut=relation_lut,
link_prediction_raw=True,
link_prediction_filt=False,
batch_size=1000000,
reverse=False)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', patience=3, threshold_mode='abs', threshold=5,
factor=0.5, min_lr=1e-9, verbose=True
)
negative_sampler = UnifNegativeSampler(triples=train_dataset,
entity_dict_len=len(node_lut),
relation_dict_len=len(relation_lut),
node_lut=node_lut)
trainer = Trainer(
train_dataset=train_dataset,
valid_dataset=valid_dataset,
train_sampler=train_sampler,
valid_sampler=valid_sampler,
test_dataset=test_dataset,
test_sampler=test_sampler,
model=model,
loss=loss,
optimizer=optimizer,
negative_sampler=negative_sampler,
device=device,
output_path="../../dataset",
lookuptable_E=node_lut,
lookuptable_R=relation_lut,
metric=metric,
lr_scheduler=lr_scheduler,
trainer_batch_size=100000,
total_epoch=3000,
apex=True,
dataloaderX=True,
num_workers=1,
pin_memory=True,
use_tensorboard_epoch=100,
use_matplotlib_epoch=100,
use_savemodel_epoch=100,
use_metric_epoch=100
)
trainer.train()
| [
"torch.utils.data.RandomSampler",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
] | 1.10.1 | jinzhuoran/CogKGE | b0e819a1d34cf61a7d70c33808da3377b73c8fd6 |
1.9 | import pytest
import torch
from ludwig.encoders import text_encoders
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_albert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
albert_encoder = text_encoders.ALBERTEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(albert_encoder.input_dtype)
inputs = torch.rand((2, max_sequence_length)).type(albert_encoder.input_dtype)
outputs = albert_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == albert_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "cls_pooled", "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_bert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
bert = text_encoders.BERTEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(bert.input_dtype)
outputs = bert(inputs)
assert outputs["encoder_output"].shape[1:] == bert.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", ["last", "sum", "mean"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_xlm_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
xlm_encoder = text_encoders.XLMEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(xlm_encoder.input_dtype)
outputs = xlm_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == xlm_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_gpt_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
gpt_encoder = text_encoders.GPTEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(gpt_encoder.input_dtype)
outputs = gpt_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == gpt_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", ["cls_pooled", "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_roberta_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
roberta_encoder = text_encoders.RoBERTaEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(roberta_encoder.input_dtype)
outputs = roberta_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == roberta_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [True, False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_gpt2_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
gpt_encoder = text_encoders.GPT2Encoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(gpt_encoder.input_dtype)
outputs = gpt_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == gpt_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_distil_bert(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
distil_bert_encoder = text_encoders.DistilBERTEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(distil_bert_encoder.input_dtype)
outputs = distil_bert_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == distil_bert_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_transfoxl_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
transfo = text_encoders.TransformerXLEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.randint(10, (2, max_sequence_length)).type(transfo.input_dtype)
outputs = transfo(inputs)
assert outputs["encoder_output"].shape[1:] == transfo.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_ctrl_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.CTRLEncoder(
max_sequence_length,
use_pretrained=use_pretrained,
reduce_output=reduce_output,
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "cls_pooled"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_camembert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.CamemBERTEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "cls_pooled"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_longformer_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.LongformerEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_mt5_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
mt5_encoder = text_encoders.MT5Encoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(mt5_encoder.input_dtype)
outputs = mt5_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == mt5_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_xlmroberta_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
xlmroberta_encoder = text_encoders.XLMRoBERTaEncoder(
use_pretrained=use_pretrained,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(xlmroberta_encoder.input_dtype)
outputs = xlmroberta_encoder(inputs)
assert outputs["encoder_output"].shape[1:] == xlmroberta_encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "cls_pooled"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_longformer_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.LongformerEncoder(
use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_electra_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.ELECTRAEncoder(
use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
@pytest.mark.parametrize("pretrained_model_name_or_path", ["bert-base-uncased"])
@pytest.mark.parametrize("reduce_output", [None, "sum", "cls_pooled"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_auto_transformer_encoder(pretrained_model_name_or_path: str, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.AutoTransformerEncoder(
pretrained_model_name_or_path=pretrained_model_name_or_path,
reduce_output=reduce_output,
max_sequence_length=max_sequence_length,
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_flaubert_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.FlauBERTEncoder(
use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
@pytest.mark.parametrize("use_pretrained", [False])
@pytest.mark.parametrize("reduce_output", [None, "sum"])
@pytest.mark.parametrize("max_sequence_length", [20])
def test_t5_encoder(use_pretrained: bool, reduce_output: str, max_sequence_length: int):
encoder = text_encoders.T5Encoder(
use_pretrained=use_pretrained, reduce_output=reduce_output, max_sequence_length=max_sequence_length
)
inputs = torch.rand((2, max_sequence_length)).type(encoder.input_dtype)
outputs = encoder(inputs)
assert outputs["encoder_output"].shape[1:] == encoder.output_shape
| [
"torch.rand",
"torch.randint"
] | 1.9.0 | jimthompson5802/ludwig | 8a369328a3f839d9cdb3710be315952c7891d7c0 |
1.9 | import contextlib
import os
from typing import List
from unittest.mock import Mock, patch
import pytest
import torch
from ludwig.utils.torch_utils import (
_get_torch_init_params,
_set_torch_init_params,
initialize_pytorch,
sequence_length_2D,
sequence_length_3D,
)
@pytest.mark.parametrize("input_sequence", [[[0, 1, 1], [2, 0, 0], [3, 3, 3]]])
@pytest.mark.parametrize("expected_output", [[2, 1, 3]])
def test_sequence_length_2D(input_sequence: List[List[int]], expected_output: List[int]):
output_seq_length = sequence_length_2D(torch.tensor(input_sequence))
assert torch.equal(torch.tensor(expected_output), output_seq_length)
@pytest.mark.parametrize("input_sequence", [[[[-1, 0, 1], [1, -2, 0]], [[0, 0, 0], [3, 0, -2]]]])
@pytest.mark.parametrize("expected_output", [[2, 1]])
def test_sequence_length_3D(input_sequence: List[List[List[int]]], expected_output: List[int]):
input_sequence = torch.tensor(input_sequence, dtype=torch.int32)
expected_output = torch.tensor(expected_output, dtype=torch.int32)
output_seq_length = sequence_length_3D(input_sequence)
assert torch.equal(expected_output, output_seq_length)
@contextlib.contextmanager
def clean_params():
prev = _get_torch_init_params()
try:
_set_torch_init_params(None)
if "CUDA_VISIBLE_DEVICES" in os.environ:
del os.environ["CUDA_VISIBLE_DEVICES"]
yield
finally:
_set_torch_init_params(prev)
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_only_once(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
# During first time initialization, set pytorch parallelism
initialize_pytorch(allow_parallel_threads=False)
mock_torch.set_num_threads.assert_called_once()
mock_torch.set_num_interop_threads.assert_called_once()
# Reset call counts on all threading calls
mock_torch.reset_mock()
# In the second call to initialization, avoid calling these methods again, as pytorch
# will raise an exception
initialize_pytorch(allow_parallel_threads=False)
mock_torch.set_num_threads.assert_not_called()
mock_torch.set_num_interop_threads.assert_not_called()
# No GPUs were specified, so this should not have been called even once
mock_torch.cuda.memory.set_per_process_memory_fraction.assert_not_called()
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_gpu_list(mock_torch):
# For test purposes, these devices can be anything, we just need to be able to uniquely
# identify them.
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus=[1, 2])
assert os.environ["CUDA_VISIBLE_DEVICES"] == "1,2"
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_gpu_string(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus="1,2")
assert os.environ["CUDA_VISIBLE_DEVICES"] == "1,2"
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_gpu_int(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus=1)
mock_torch.cuda.set_device.assert_called_with(1)
assert "CUDA_VISIBLE_DEVICES" not in os.environ
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_without_gpu(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
with clean_params():
initialize_pytorch(gpus=-1)
assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_horovod(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
mock_hvd = Mock()
mock_hvd.local_rank.return_value = 1
mock_hvd.local_size.return_value = 4
with clean_params():
initialize_pytorch(horovod=mock_hvd)
mock_torch.cuda.set_device.assert_called_with(1)
assert "CUDA_VISIBLE_DEVICES" not in os.environ
@patch("ludwig.utils.torch_utils.warnings")
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_horovod_bad_local_rank(mock_torch, mock_warnings):
"""In this scenario, the local_size 5 is out of the bounds of the GPU indices."""
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
mock_hvd = Mock()
mock_hvd.local_rank.return_value = 1
mock_hvd.local_size.return_value = 5
with clean_params():
initialize_pytorch(horovod=mock_hvd)
assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
mock_warnings.warn.assert_called()
@patch("ludwig.utils.torch_utils.torch")
def test_initialize_pytorch_with_horovod_explicit_gpus(mock_torch):
mock_torch.cuda.is_available.return_value = True
mock_torch.cuda.device_count.return_value = 4
mock_hvd = Mock()
mock_hvd.local_rank.return_value = 1
mock_hvd.local_size.return_value = 4
with clean_params():
initialize_pytorch(gpus="-1", horovod=mock_hvd)
assert os.environ["CUDA_VISIBLE_DEVICES"] == ""
| [
"torch.equal",
"torch.tensor"
] | 1.9.0 | jimthompson5802/ludwig | 8a369328a3f839d9cdb3710be315952c7891d7c0 |
1.9 | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import random
from typing import Dict
import numpy as np
import torch
from ludwig.constants import (
COLUMN,
FILL_WITH_CONST,
HIDDEN,
LOGITS,
LOSS,
MEAN_ABSOLUTE_ERROR,
MEAN_SQUARED_ERROR,
MISSING_VALUE_STRATEGY_OPTIONS,
NAME,
NUMERICAL,
PREDICTIONS,
PROC_COLUMN,
R2,
ROOT_MEAN_SQUARED_ERROR,
ROOT_MEAN_SQUARED_PERCENTAGE_ERROR,
SUM,
TIED,
TYPE,
)
from ludwig.features.base_feature import InputFeature, OutputFeature
from ludwig.utils import output_feature_utils
from ludwig.utils.misc_utils import get_from_registry, set_default_value, set_default_values
logger = logging.getLogger(__name__)
class ZScoreTransformer:
def __init__(self, mean: float = None, std: float = None, **kwargs: dict):
self.mu = mean
self.sigma = std
def transform(self, x: np.ndarray) -> np.ndarray:
return (x - self.mu) / self.sigma
def inverse_transform(self, x: np.ndarray) -> np.ndarray:
return x * self.sigma + self.mu
@staticmethod
def fit_transform_params(column: np.ndarray, backend: "Backend") -> dict: # noqa
compute = backend.df_engine.compute
return {
"mean": compute(column.astype(np.float32).mean()),
"std": compute(column.astype(np.float32).std()),
}
class MinMaxTransformer:
def __init__(self, min: float = None, max: float = None, **kwargs: dict):
self.min_value = min
self.max_value = max
self.range = None if min is None or max is None else max - min
def transform(self, x: np.ndarray) -> np.ndarray:
return (x - self.min_value) / self.range
def inverse_transform(self, x: np.ndarray) -> np.ndarray:
if self.range is None:
raise ValueError("Numeric transformer needs to be instantiated with " "min and max values.")
return x * self.range + self.min_value
@staticmethod
def fit_transform_params(column: np.ndarray, backend: "Backend") -> dict: # noqa
compute = backend.df_engine.compute
return {
"min": compute(column.astype(np.float32).min()),
"max": compute(column.astype(np.float32).max()),
}
class Log1pTransformer:
def __init__(self, **kwargs: dict):
pass
def transform(self, x: np.ndarray) -> np.ndarray:
if np.any(x <= 0):
raise ValueError(
"One or more values are non-positive. " "log1p normalization is defined only for positive values."
)
return np.log1p(x)
def inverse_transform(self, x: np.ndarray) -> np.ndarray:
return np.expm1(x)
@staticmethod
def fit_transform_params(column: np.ndarray, backend: "Backend") -> dict: # noqa
return {}
class IdentityTransformer:
def __init__(self, **kwargs):
pass
def transform(self, x: np.ndarray) -> np.ndarray:
return x
def inverse_transform(self, x: np.ndarray) -> np.ndarray:
return x
@staticmethod
def fit_transform_params(column: np.ndarray, backend: "Backend") -> dict: # noqa
return {}
numeric_transformation_registry = {
"minmax": MinMaxTransformer,
"zscore": ZScoreTransformer,
"log1p": Log1pTransformer,
None: IdentityTransformer,
}
class NumericalFeatureMixin:
type = NUMERICAL
preprocessing_defaults = {
"missing_value_strategy": FILL_WITH_CONST,
"fill_value": 0,
"normalization": None,
}
preprocessing_schema = {
"missing_value_strategy": {
"type": "string",
"enum": MISSING_VALUE_STRATEGY_OPTIONS,
},
"fill_value": {"type": "number"},
"computed_fill_value": {"type": "number"},
"normalization": {
"type": ["string", "null"],
"enum": list(numeric_transformation_registry.keys()),
},
}
@staticmethod
def cast_column(column, backend):
return backend.df_engine.df_lib.to_numeric(column, errors="coerce").astype(np.float32)
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
numeric_transformer = get_from_registry(
preprocessing_parameters.get("normalization", None),
numeric_transformation_registry,
)
return numeric_transformer.fit_transform_params(column, backend)
@staticmethod
def add_feature_data(
feature,
input_df,
proc_df,
metadata,
preprocessing_parameters,
backend,
skip_save_processed_input,
):
proc_df[feature[PROC_COLUMN]] = input_df[feature[COLUMN]].astype(np.float32).values
# normalize data as required
numeric_transformer = get_from_registry(
preprocessing_parameters.get("normalization", None),
numeric_transformation_registry,
)(**metadata[feature[NAME]])
proc_df[feature[PROC_COLUMN]] = numeric_transformer.transform(proc_df[feature[PROC_COLUMN]])
return proc_df
class NumericalInputFeature(NumericalFeatureMixin, InputFeature):
encoder = "passthrough"
def __init__(self, feature, encoder_obj=None):
# Required for certain encoders, maybe pass into initialize_encoder
super().__init__(feature)
self.overwrite_defaults(feature)
feature["input_size"] = self.input_shape[-1]
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(feature)
def forward(self, inputs):
assert isinstance(inputs, torch.Tensor)
assert inputs.dtype == torch.float32 or inputs.dtype == torch.float64
assert len(inputs.shape) == 1 or (len(inputs.shape) == 2 and inputs.shape[1] == 1)
if len(inputs.shape) == 1:
inputs = inputs[:, None]
inputs_encoded = self.encoder_obj(inputs)
return inputs_encoded
def create_sample_input(self):
# Used by get_model_inputs(), which is used for tracing-based torchscript generation.
return torch.Tensor([random.randint(1, 100), random.randint(1, 100)])
@property
def input_shape(self) -> torch.Size:
return torch.Size([1])
@property
def output_shape(self) -> torch.Size:
return torch.Size(self.encoder_obj.output_shape)
@staticmethod
def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):
pass
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, TIED, None)
class NumericalOutputFeature(NumericalFeatureMixin, OutputFeature):
decoder = "regressor"
loss = {TYPE: MEAN_SQUARED_ERROR}
metric_functions = {
LOSS: None,
MEAN_SQUARED_ERROR: None,
MEAN_ABSOLUTE_ERROR: None,
ROOT_MEAN_SQUARED_ERROR: None,
ROOT_MEAN_SQUARED_PERCENTAGE_ERROR: None,
R2: None,
}
default_validation_metric = MEAN_SQUARED_ERROR
clip = None
def __init__(self, feature):
super().__init__(feature)
self.overwrite_defaults(feature)
feature["input_size"] = self.input_shape[-1]
self.decoder_obj = self.initialize_decoder(feature)
self._setup_loss()
self._setup_metrics()
def logits(self, inputs, **kwargs): # hidden
hidden = inputs[HIDDEN]
return self.decoder_obj(hidden)
def predictions(self, inputs: Dict[str, torch.Tensor], feature_name: str, **kwargs):
logits = output_feature_utils.get_output_feature_tensor(inputs, feature_name, LOGITS)
predictions = logits
if self.clip is not None:
if isinstance(self.clip, (list, tuple)) and len(self.clip) == 2:
predictions = torch.clamp(logits, self.clip[0], self.clip[1])
logger.debug(f" clipped_predictions: {predictions}")
else:
raise ValueError(
"The clip parameter of {} is {}. "
"It must be a list or a tuple of length 2.".format(self.feature_name, self.clip)
)
return {PREDICTIONS: predictions, LOGITS: logits}
def get_prediction_set(self):
return {PREDICTIONS, LOGITS}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.input_size])
@classmethod
def get_output_dtype(cls):
return torch.float32
@property
def output_shape(self) -> torch.Size:
return torch.Size([1])
@staticmethod
def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):
pass
@staticmethod
def calculate_overall_stats(predictions, targets, metadata):
# no overall stats, just return empty dictionary
return {}
def postprocess_predictions(
self,
predictions,
metadata,
output_directory,
backend,
):
predictions_col = f"{self.feature_name}_{PREDICTIONS}"
if predictions_col in predictions:
# as needed convert predictions make to original value space
numeric_transformer = get_from_registry(
metadata["preprocessing"].get("normalization", None),
numeric_transformation_registry,
)(**metadata)
predictions[predictions_col] = backend.df_engine.map_objects(
predictions[predictions_col],
lambda pred: numeric_transformer.inverse_transform(pred),
)
return predictions
@staticmethod
def populate_defaults(output_feature):
set_default_value(output_feature, LOSS, {TYPE: "mean_squared_error", "weight": 1})
set_default_value(output_feature[LOSS], TYPE, "mean_squared_error")
set_default_value(output_feature[LOSS], "weight", 1)
set_default_values(
output_feature,
{
"clip": None,
"dependencies": [],
"reduce_input": SUM,
"reduce_dependencies": SUM,
},
)
| [
"torch.Size",
"torch.clamp"
] | 1.9.0 | jimthompson5802/ludwig | 8a369328a3f839d9cdb3710be315952c7891d7c0 |
0.4 | import argparse
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import torch
import torch.optim as optim
import torchvision.utils as vutils
from swae.distributions import rand_cirlce2d, rand_ring2d, rand_uniform2d
from swae.models.mnist import MNISTAutoencoder
from swae.trainer import SWAEBatchTrainer
from torchvision import datasets, transforms
def main():
# train args
parser = argparse.ArgumentParser(description='Sliced Wasserstein Autoencoder PyTorch MNIST Example')
parser.add_argument('--datadir', default='/input/', help='path to dataset')
parser.add_argument('--outdir', default='/output/', help='directory to output images and model checkpoints')
parser.add_argument('--batch-size', type=int, default=500, metavar='N',
help='input batch size for training (default: 500)')
parser.add_argument('--epochs', type=int, default=30, metavar='N',
help='number of epochs to train (default: 30)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--alpha', type=float, default=0.9, metavar='A',
help='RMSprop alpha/rho (default: 0.9)')
parser.add_argument('--distribution', type=str, default='circle', metavar='DIST',
help='Latent Distribution (default: circle)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--num-workers', type=int, default=8, metavar='N',
help='number of dataloader workers if device is CPU (default: 8)')
parser.add_argument('--seed', type=int, default=7, metavar='S',
help='random seed (default: 7)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='number of batches to log training status (default: 10)')
args = parser.parse_args()
# create output directory
imagesdir = os.path.join(args.outdir, 'images')
chkptdir = os.path.join(args.outdir, 'models')
os.makedirs(args.datadir, exist_ok=True)
os.makedirs(imagesdir, exist_ok=True)
os.makedirs(chkptdir, exist_ok=True)
# determine device and device dep. args
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
dataloader_kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {'num_workers': args.num_workers, 'pin_memory': False}
# set random seed
torch.manual_seed(args.seed)
if use_cuda:
torch.cuda.manual_seed(args.seed)
# log args
print('batch size {}\nepochs {}\nRMSprop lr {} alpha {}\ndistribution {}\nusing device {}\nseed set to {}'.format(
args.batch_size, args.epochs, args.lr, args.alpha, args.distribution, device.type, args.seed
))
# build train and test set data loaders
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.datadir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **dataloader_kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.datadir, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=64, shuffle=False, **dataloader_kwargs)
# create encoder and decoder
model = MNISTAutoencoder().to(device)
print(model)
# create optimizer
# matching default Keras args for RMSprop
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, alpha=args.alpha)
# determine latent distribution
if args.distribution == 'circle':
distribution_fn = rand_cirlce2d
elif args.distribution == 'ring':
distribution_fn = rand_ring2d
else:
distribution_fn = rand_uniform2d
# create batch sliced_wasserstein autoencoder trainer
trainer = SWAEBatchTrainer(model, optimizer, distribution_fn, device=device)
# put networks in training mode
model.train()
# train networks for n epochs
print('training...')
for epoch in range(args.epochs):
if epoch > 10:
trainer.weight *= 1.1
# train autoencoder on train dataset
for batch_idx, (x, y) in enumerate(train_loader, start=0):
batch = trainer.train_on_batch(x)
if (batch_idx + 1) % args.log_interval == 0:
print('Train Epoch: {} ({:.2f}%) [{}/{}]\tLoss: {:.6f}'.format(
epoch + 1, float(epoch + 1) / (args.epochs) * 100.,
(batch_idx + 1), len(train_loader),
batch['loss'].item()))
# evaluate autoencoder on test dataset
test_encode, test_targets, test_loss = list(), list(), 0.0
with torch.no_grad():
for test_batch_idx, (x_test, y_test) in enumerate(test_loader, start=0):
test_evals = trainer.test_on_batch(x_test)
test_encode.append(test_evals['encode'].detach())
test_loss += test_evals['loss'].item()
test_targets.append(y_test)
test_encode, test_targets = torch.cat(test_encode).cpu().numpy(), torch.cat(test_targets).cpu().numpy()
test_loss /= len(test_loader)
print('Test Epoch: {} ({:.2f}%)\tLoss: {:.6f}'.format(
epoch + 1, float(epoch + 1) / (args.epochs) * 100.,
test_loss))
print('{{"metric": "loss", "value": {}}}'.format(test_loss))
# save model
torch.save(model.state_dict(), '{}/mnist_epoch_{}.pth'.format(chkptdir, epoch + 1))
# save encoded samples plot
plt.figure(figsize=(10, 10))
plt.scatter(test_encode[:, 0], -test_encode[:, 1], c=(10 * test_targets), cmap=plt.cm.Spectral)
plt.xlim([-1.5, 1.5])
plt.ylim([-1.5, 1.5])
plt.title('Test Latent Space\nLoss: {:.5f}'.format(test_loss))
plt.savefig('{}/test_latent_epoch_{}.png'.format(imagesdir, epoch + 1))
plt.close()
# save sample input and reconstruction
vutils.save_image(x, '{}/test_samples_epoch_{}.png'.format(imagesdir, epoch + 1))
vutils.save_image(batch['decode'].detach(),
'{}/test_reconstructions_epoch_{}.png'.format(imagesdir, epoch + 1),
normalize=True)
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.cat",
"torch.cuda.manual_seed",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.is_available"
] | 0.4.1 | eifuentes/swae-pytorch | 763f771c1d4860f71819af48d4f21a8a29a689d5 |
0.1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import platform
import random
from abc import ABCMeta, abstractmethod
from typing import ClassVar, Dict, List
import torch
import torch.multiprocessing as mp
from beanmachine.ppl.inference.monte_carlo_samples import MonteCarloSamples
from beanmachine.ppl.inference.utils import (
_verify_queries_and_observations,
VerboseLevel,
)
from beanmachine.ppl.legacy.world import World
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.model.utils import LogLevel
from torch import Tensor
from torch.multiprocessing import Queue
LOGGER = logging.getLogger("beanmachine")
class AbstractInference(object, metaclass=ABCMeta):
"""
Abstract inference object that all inference algorithms inherit from.
"""
world_: World
_rand_int_max: ClassVar[int] = 2**62
def __init__(self):
self.initial_world_ = World()
self.world_ = self.initial_world_
self.queries_ = []
self.observations_ = {}
@staticmethod
def set_seed(seed: int):
torch.manual_seed(seed)
random.seed(seed)
def initialize_world(
self,
initialize_from_prior: bool = False,
):
"""
Initializes the world variables with queries and observation calls.
:param initialize_from_prior: boolean to initialize samples from prior
approximation.
"""
self.world_ = self.initial_world_.copy()
self.world_.set_observations(self.observations_)
self.world_.set_initialize_from_prior(initialize_from_prior)
for node in self.observations_:
# makes the call for the observation node, which will run sample(node())
# that results in adding its corresponding Variable and its dependent
# Variable to the world
self.world_.call(node)
for node in self.queries_:
# makes the call for the query node, which will run sample(node())
# that results in adding its corresponding Variable and its dependent
# Variable to the world.
self.world_.call(node)
self.world_.accept_diff()
def reset(self):
"""
Resets world, mode and observation
"""
self.world_ = self.initial_world_.copy()
self.queries_ = []
self.observations_ = {}
class AbstractMCInference(AbstractInference, metaclass=ABCMeta):
"""
Abstract inference object for Monte Carlo inference.
"""
_observations_must_be_rv: bool = True
@staticmethod
def set_seed_for_chain(random_seed: int, chain: int):
AbstractInference.set_seed(random_seed + chain * 31)
@abstractmethod
def _infer(
self,
num_samples: int,
num_adaptive_samples: int = 0,
verbose: VerboseLevel = VerboseLevel.LOAD_BAR,
initialize_from_prior: bool = False,
) -> Dict[RVIdentifier, Tensor]:
"""
Abstract method to be implemented by classes that inherit from
AbstractInference.
"""
raise NotImplementedError("Inference algorithm must implement _infer.")
def _parallel_infer(
self,
queue: Queue,
chain: int,
num_samples: int,
random_seed: int,
num_adaptive_samples: int,
verbose: VerboseLevel,
):
try:
AbstractMCInference.set_seed_for_chain(random_seed, chain)
rv_dict = self._infer(num_samples, num_adaptive_samples, verbose)
string_dict = {str(rv): tensor.detach() for rv, tensor in rv_dict.items()}
queue.put((None, chain, string_dict))
except BaseException as x:
LOGGER.log(
LogLevel.ERROR.value, "Error: Parallel infererence chain failed."
)
queue.put((x, chain, {}))
def infer(
self,
queries: List[RVIdentifier],
observations: Dict[RVIdentifier, Tensor],
num_samples: int,
num_chains: int = 4,
run_in_parallel: bool = False,
num_adaptive_samples: int = 0,
verbose: VerboseLevel = VerboseLevel.LOAD_BAR,
initialize_from_prior: bool = False,
) -> MonteCarloSamples:
"""
Run inference algorithms and reset the world/mode at the end.
All tensors in `queries` and `observations` must be allocated on the
same `torch.device`. Inference algorithms will attempt to allocate
intermediate tensors on the same device.
:param queries: random variables to query
:param observations: observed random variables with their values
:param num_samples: number of samples excluding adaptation to collect.
:param num_chains: number of chains to run
:param num_adaptive_samples: number of steps to allow proposer adaptation.
:param verbose: Integer indicating how much output to print to stdio
:param initialize_from_prior: boolean to initialize samples from prior
:returns: view of data for chains and samples for query
"""
_verify_queries_and_observations(
queries, observations, self._observations_must_be_rv
)
random_seed = torch.randint(AbstractInference._rand_int_max, (1,)).int().item()
self.queries_ = queries
self.observations_ = observations
if num_chains > 1 and run_in_parallel:
if platform.system() == "Windows":
raise RuntimeError(
"Running inference in parallel is not currently support on Windows"
)
ctx = mp.get_context("fork")
manager = ctx.Manager()
q = manager.Queue()
for chain in range(num_chains):
p = ctx.Process(
target=self._parallel_infer,
args=(
q,
chain,
num_samples,
random_seed,
num_adaptive_samples,
verbose,
),
)
p.start()
chain_queries = [{}] * num_chains
for _ in range(num_chains):
(error, chain, string_dict) = q.get()
if error is not None:
raise error
rv_dict = {rv: string_dict[str(rv)] for rv in queries}
chain_queries[chain] = rv_dict
else:
chain_queries = []
for chain in range(num_chains):
AbstractMCInference.set_seed_for_chain(random_seed, chain)
rv_dicts = self._infer(
num_samples,
num_adaptive_samples,
verbose,
initialize_from_prior,
)
chain_queries.append(rv_dicts)
monte_carlo_samples = MonteCarloSamples(chain_queries, num_adaptive_samples)
self.reset()
return monte_carlo_samples
| [
"torch.manual_seed",
"torch.multiprocessing.get_context",
"torch.randint"
] | 0.1.0 | feynmanliang/beanmachine | 5dea2b9f6387f2f7fd1e53b0915a1b8405f2b46b |
0.1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections.abc import Iterable
from typing import Iterable as IterableType, overload, Type, Union
import torch
import torch.distributions as dist
import torch.distributions.constraints as constraints
from torch.distributions import Distribution
from torch.distributions.transforms import Transform
ConstraintType = Union[constraints.Constraint, Type]
class BetaDimensionTransform(Transform):
"""
Volume preserving transformation to the Beta distribution support.
"""
bijective = True
domain = constraints.real
codomain = constraints.real_vector
def __eq__(self, other):
return isinstance(other, BetaDimensionTransform)
def _call(self, x):
return torch.cat((x.unsqueeze(-1), (1 - x).unsqueeze(-1)), -1)
def _inverse(self, y):
return y[..., 0] / y.sum(dim=-1)
def forward_shape(self, shape):
return shape + (2,)
def inverse_shape(self, shape):
return shape[:-1]
def log_abs_det_jacobian(self, x, y):
return torch.zeros_like(x)
def _unwrap(constraint: ConstraintType):
if isinstance(constraint, constraints.independent):
return _unwrap(constraint.base_constraint)
return constraint if isinstance(constraint, type) else constraint.__class__
def _is_constraint_eq(constraint1: ConstraintType, constraint2: ConstraintType):
return _unwrap(constraint1) == _unwrap(constraint2)
@overload
def is_constraint_eq(
constraint: ConstraintType, check_constraints: ConstraintType
) -> bool:
...
@overload
def is_constraint_eq(
constraint: ConstraintType, check_constraints: IterableType[ConstraintType]
) -> IterableType[bool]:
...
def is_constraint_eq(
constraint: ConstraintType,
check_constraints: Union[ConstraintType, IterableType[ConstraintType]],
) -> Union[bool, IterableType[bool]]:
"""
This provides an equality check that works for different constraints
specified in :mod:`torch.distributions.constraints`. If `constraint` is
`constraints.Independent`, then the `base_constraint` is checked. If
`check_constraints` is a single `Constraint` type or instance this
returns a `True` if the given `constraint` matches `check_constraints`.
Otherwise, if `check_constraints` is an iterable, this returns a `bool`
list that represents an element-wise check.
:param constraint: A constraint class or instance.
:param check_constraints: A constraint class or instance or an iterable
containing constraint classes or instances to check against.
:returns: bool (or a list of bool) values indicating if the given constraint
equals the constraint in `check_constraints`.
"""
if isinstance(check_constraints, Iterable):
return [_is_constraint_eq(constraint, c) for c in check_constraints]
return _is_constraint_eq(constraint, check_constraints)
def get_default_transforms(distribution: Distribution) -> dist.Transform:
"""
Get transforms of a distribution to transform it from constrained space
into unconstrained space.
:param distribution: the distribution to check
:returns: a Transform that need to be applied to the distribution
to transform it from constrained space into unconstrained space
"""
if distribution.support.is_discrete:
return dist.transforms.identity_transform
else:
return dist.biject_to(distribution.support).inv
def initialize_value(distribution: Distribution, initialize_from_prior: bool = False):
"""
Initialized the Variable value
:param initialize_from_prior: if true, returns sample from prior
:returns: the value to the set the Variable value to
"""
sample_val = distribution.sample()
if initialize_from_prior:
return sample_val
support = distribution.support
if isinstance(support, dist.constraints.independent):
support = support.base_constraint
if initialize_from_prior:
return sample_val
elif is_constraint_eq(support, dist.constraints.real):
return torch.zeros_like(sample_val)
elif is_constraint_eq(support, dist.constraints.simplex):
value = torch.ones_like(sample_val)
return value / sample_val.shape[-1]
elif is_constraint_eq(support, dist.constraints.greater_than):
return (
torch.ones(
sample_val.shape, dtype=sample_val.dtype, device=sample_val.device
)
+ support.lower_bound
)
elif is_constraint_eq(support, dist.constraints.boolean):
return dist.Bernoulli(torch.ones_like(sample_val) / 2).sample()
elif is_constraint_eq(support, dist.constraints.interval):
lower_bound = torch.ones_like(sample_val) * support.lower_bound
upper_bound = torch.ones_like(sample_val) * support.upper_bound
return dist.Uniform(lower_bound, upper_bound).sample()
elif is_constraint_eq(support, dist.constraints.integer_interval):
integer_interval = support.upper_bound - support.lower_bound
return dist.Categorical(
(torch.ones(integer_interval, device=sample_val.device)).expand(
sample_val.shape + (integer_interval,)
)
).sample()
elif is_constraint_eq(support, dist.constraints.nonnegative_integer):
return (
torch.ones(
sample_val.shape, dtype=sample_val.dtype, device=sample_val.device
)
+ support.lower_bound
)
return sample_val
| [
"torch.distributions.Uniform",
"torch.distributions.biject_to",
"torch.ones",
"torch.ones_like",
"torch.zeros_like"
] | 0.1.0 | feynmanliang/beanmachine | 225114d9964b90c3a49adddc4387b4a47d1b4262 |
1.8 | import argparse
import os
import numpy as np
from torch.utils.data import DataLoader
from . import ImageDataset
from .core import get_inception_feature
def calc_and_save_stats(path, output, batch_size):
dataset = ImageDataset(path, exts=['png', 'jpg'])
loader = DataLoader(dataset, batch_size=batch_size, num_workers=4)
acts, = get_inception_feature(loader, dims=[2048], verbose=True)
mu = np.mean(acts, axis=0)
sigma = np.cov(acts, rowvar=False)
if os.path.dirname(output) != "":
os.makedirs(os.path.dirname(output), exist_ok=True)
np.savez_compressed(output, mu=mu, sigma=sigma)
if __name__ == '__main__':
parser = argparse.ArgumentParser("Pre-calculate statistics of images")
parser.add_argument("--path", type=str, required=True,
help='path to image directory')
parser.add_argument("--output", type=str, required=True,
help="output path")
parser.add_argument("--batch_size", type=int, default=50,
help="batch size (default=50)")
args = parser.parse_args()
calc_and_save_stats(args.path, args.output, args.batch_size)
| [
"torch.utils.data.DataLoader"
] | 1.8.1 | w86763777/Pytorch-Unified-FID-IS-Score | 6a2620d6da0faa66bb798aa47c7e0e49ef2032b6 |
1.11 | import datetime
import itertools
import os
from typing import Optional
import pytest
import torch
import torch.distributed as dist
import optuna
from optuna.integration import TorchDistributedTrial
from optuna.testing.integration import DeterministicPruner
from optuna.testing.storage import STORAGE_MODES
from optuna.testing.storage import StorageSupplier
@pytest.fixture(scope="session", autouse=True)
def init_process_group() -> None:
if "OMPI_COMM_WORLD_SIZE" not in os.environ:
pytest.skip("This test is expected to be launch with mpirun.")
# This function is automatically called at the beginning of the pytest session.
os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"]
os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"]
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = "20000"
dist.init_process_group("gloo", timeout=datetime.timedelta(seconds=15))
def test_torch_distributed_trial_experimental_warning() -> None:
with pytest.warns(optuna.exceptions.ExperimentalWarning):
if dist.get_rank() == 0:
study = optuna.create_study()
TorchDistributedTrial(study.ask())
else:
TorchDistributedTrial(None)
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
def test_torch_distributed_trial_invalid_argument() -> None:
with pytest.raises(ValueError):
if dist.get_rank() == 0:
TorchDistributedTrial(None)
else:
study = optuna.create_study()
TorchDistributedTrial(study.ask())
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_suggest_float(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
x1 = trial.suggest_float("x", 0, 1)
assert 0 <= x1 <= 1
x2 = trial.suggest_float("x", 0, 1)
assert x1 == x2
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_suggest_uniform(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
x1 = trial.suggest_uniform("x", 0, 1)
assert 0 <= x1 <= 1
x2 = trial.suggest_uniform("x", 0, 1)
assert x1 == x2
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_suggest_loguniform(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
x1 = trial.suggest_loguniform("x", 1e-7, 1)
assert 1e-7 <= x1 <= 1
x2 = trial.suggest_loguniform("x", 1e-7, 1)
assert x1 == x2
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_suggest_discrete_uniform(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
x1 = trial.suggest_discrete_uniform("x", 0, 10, 2)
assert 0 <= x1 <= 10
assert x1 % 2 == 0
x2 = trial.suggest_discrete_uniform("x", 0, 10, 2)
assert x1 == x2
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_suggest_int(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
x1 = trial.suggest_int("x", 0, 10)
assert 0 <= x1 <= 10
x2 = trial.suggest_int("x", 0, 10)
assert x1 == x2
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_suggest_categorical(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
x1 = trial.suggest_categorical("x", ("a", "b", "c"))
assert x1 in {"a", "b", "c"}
x2 = trial.suggest_categorical("x", ("a", "b", "c"))
assert x1 == x2
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_report(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
study: Optional[optuna.study.Study] = None
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
trial.report(1, 0)
if dist.get_rank() == 0:
assert study is not None
study.trials[0].intermediate_values[0] == 1
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_report_nan(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
study: Optional[optuna.study.Study] = None
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
with pytest.raises(TypeError):
trial.report("abc", 0) # type: ignore
if dist.get_rank() == 0:
assert study is not None
assert len(study.trials[0].intermediate_values) == 0
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize(
"storage_mode, is_pruning", itertools.product(STORAGE_MODES, [False, True])
)
def test_should_prune(storage_mode: str, is_pruning: bool) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage, pruner=DeterministicPruner(is_pruning))
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
trial.report(1, 0)
assert trial.should_prune() == is_pruning
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_user_attrs(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
trial.set_user_attr("dataset", "mnist")
trial.set_user_attr("batch_size", 128)
assert trial.user_attrs["dataset"] == "mnist"
assert trial.user_attrs["batch_size"] == 128
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
def test_user_attrs_with_exception() -> None:
with StorageSupplier("sqlite") as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
with pytest.raises(TypeError):
trial.set_user_attr("not serializable", torch.Tensor([1, 2]))
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_system_attrs(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
trial.set_system_attr("dataset", "mnist")
trial.set_system_attr("batch_size", 128)
assert trial.system_attrs["dataset"] == "mnist"
assert trial.system_attrs["batch_size"] == 128
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
def test_system_attrs_with_exception() -> None:
with StorageSupplier("sqlite") as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
with pytest.raises(TypeError):
trial.set_system_attr("not serializable", torch.Tensor([1, 2]))
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_number(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
assert trial.number == 0
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_datetime_start(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
assert isinstance(trial.datetime_start, datetime.datetime)
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_params(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
trial.suggest_float("f", 0, 1)
trial.suggest_int("i", 0, 1)
trial.suggest_categorical("c", ("a", "b", "c"))
params = trial.params
assert 0 <= params["f"] <= 1
assert 0 <= params["i"] <= 1
assert params["c"] in {"a", "b", "c"}
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_distributions(storage_mode: str) -> None:
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
trial.suggest_float("u", 0, 1)
trial.suggest_float("lu", 1e-7, 1, log=True)
trial.suggest_float("du", 0, 1, step=0.5)
trial.suggest_int("i", 0, 1)
trial.suggest_int("il", 1, 128, log=True)
trial.suggest_categorical("c", ("a", "b", "c"))
distributions = trial.distributions
assert distributions["u"] == optuna.distributions.FloatDistribution(0, 1)
assert distributions["lu"] == optuna.distributions.FloatDistribution(1e-7, 1, log=True)
assert distributions["du"] == optuna.distributions.FloatDistribution(0, 1, step=0.5)
assert distributions["i"] == optuna.distributions.IntDistribution(0, 1)
assert distributions["il"] == optuna.distributions.IntDistribution(1, 128, log=True)
assert distributions["c"] == optuna.distributions.CategoricalDistribution(("a", "b", "c"))
@pytest.mark.filterwarnings("ignore::optuna.exceptions.ExperimentalWarning")
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_updates_properties(storage_mode: str) -> None:
"""Check for any distributed deadlock following a property read."""
with StorageSupplier(storage_mode) as storage:
if dist.get_rank() == 0:
study = optuna.create_study(storage=storage)
trial = TorchDistributedTrial(study.ask())
else:
trial = TorchDistributedTrial(None)
trial.suggest_float("f", 0, 1)
trial.suggest_int("i", 0, 1)
trial.suggest_categorical("c", ("a", "b", "c"))
property_names = [
p
for p in dir(TorchDistributedTrial)
if isinstance(getattr(TorchDistributedTrial, p), property)
]
# Rank 0 can read properties without deadlock.
if dist.get_rank() == 0:
[getattr(trial, p) for p in property_names]
dist.barrier()
# Same with rank 1.
if dist.get_rank() == 1:
[getattr(trial, p) for p in property_names]
dist.barrier()
| [
"torch.distributed.get_rank",
"torch.distributed.barrier",
"torch.Tensor"
] | 1.11.0 | masap/optuna | f56cea87c4771d53b39f441e727d733dd1785557 |
1.10 | from typing import Any, Callable, Iterable, List, Optional, Tuple, Union
import warnings
import numpy as np
import pytorch_lightning
from scipy.sparse import csr_matrix
import torch
from torchmetrics import Metric
from torchmetrics.functional import auroc
from tqdm.auto import tqdm
import collie
from collie.interactions import ExplicitInteractions, Interactions, InteractionsDataLoader
from collie.model import BasePipeline
def _get_user_item_pairs(user_ids: Union[np.array, torch.tensor],
n_items: int,
device: Union[str, torch.device]) -> Tuple[torch.tensor, torch.tensor]:
"""
Create tensors pairing each input user ID with each item ID.
Parameters
----------
user_ids: np.array or torch.tensor, 1-d
Iterable[int] of users to score
n_items: int
Number of items in the training data
device: string
Device to store tensors on
Returns
-------
users: torch.tensor, 1-d
Tensor with ``n_items`` copies of each user ID
items: torch.tensor, 1-d
Tensor with ``len(user_ids)`` copies of each item ID
Example
-------
.. code-block:: python
>>> user_ids = np.array([10, 11, 12])
>>> n_items = 4
>>> user, item = _get_user_item_pairs(user_ids: user_ids, n_items: 4, device: 'cpu'):
>>> user
np.array([10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12])
>>> item
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3])
"""
# Added because sometimes we call this function with ``n_items`` as ``np.int64`` type which
# breaks ``repeat_interleave``.
if isinstance(n_items, np.int64):
n_items = n_items.item()
users = torch.tensor(
user_ids,
dtype=torch.int64,
requires_grad=False,
device=device,
).repeat_interleave(n_items)
items = torch.arange(
start=0,
end=n_items,
requires_grad=False,
device=device,
).repeat(len(user_ids))
return users, items
def get_preds(model: BasePipeline,
user_ids: Union[np.array, torch.tensor],
n_items: int,
device: Union[str, torch.device]) -> torch.tensor:
"""
Returns a ``n_users x n_items`` tensor with the item IDs of recommended products for each user
ID.
Parameters
----------
model: collie.model.BasePipeline
Model that can take a (user_id, item_id) pair as input and return a recommendation score
user_ids: np.array or torch.tensor
Iterable[int] of users to score
n_items: int
Number of items in the training data
device: string
Device torch should use
Returns
-------
predicted_scores: torch.tensor
Tensor of shape ``n_users x n_items``
"""
user, item = _get_user_item_pairs(user_ids, n_items, device)
with torch.no_grad():
predicted_scores = model(user, item)
return predicted_scores.view(-1, n_items)
def _get_labels(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
device: str) -> torch.tensor:
"""
Returns a binary array indicating which of the recommended products are in each user's target
set.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Top ``k`` item IDs to recommend to each user of shape (n_users x k)
device: string
Device torch should use
Returns
-------
labels: torch.tensor
Tensor with the same dimensions as input ``preds``
"""
return torch.tensor(
(targets[user_ids[:, None], np.array(preds.detach().cpu())] > 0)
.astype('double')
.toarray(),
requires_grad=False,
device=device,
)
def mapk(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
k: int = 10) -> float:
"""
Calculate the mean average precision at K (MAP@K) score for each user.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Tensor of shape (n_users x n_items) with each user's scores for each item
k: int
Number of recommendations to consider per user
Returns
-------
mapk_score: float
"""
device = preds.device
n_users = preds.shape[0]
try:
predicted_items = preds.topk(k, dim=1).indices
except RuntimeError as e:
raise ValueError(
f'Ensure ``k`` ({k}) is less than the number of items ({preds.shape[1]}):', str(e)
)
topk_labeled = _get_labels(targets, user_ids, predicted_items, device)
accuracy = topk_labeled.int()
weights = (
1.0 / torch.arange(
start=1,
end=k+1,
dtype=torch.float64,
requires_grad=False,
device=device
)
).repeat(n_users, 1)
denominator = torch.min(
torch.tensor(k, device=device, dtype=torch.int).repeat(len(user_ids)),
torch.tensor(targets[user_ids].getnnz(axis=1), device=device)
)
res = ((accuracy * accuracy.cumsum(axis=1) * weights).sum(axis=1)) / denominator
res[torch.isnan(res)] = 0
return res.mean().item()
def mrr(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
k: Optional[Any] = None) -> float:
"""
Calculate the mean reciprocal rank (MRR) of the input predictions.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Tensor of shape (n_users x n_items) with each user's scores for each item
k: Any
Ignored, included only for compatibility with ``mapk``
Returns
-------
mrr_score: float
"""
predicted_items = preds.topk(preds.shape[1], dim=1).indices
labeled = _get_labels(targets, user_ids, predicted_items, device=preds.device)
# weighting each 0/1 by position so that topk returns index of *first* postive result
position_weight = 1.0/(
torch.arange(1, targets.shape[1] + 1, device=preds.device)
.repeat(len(user_ids), 1)
.float()
)
labeled_weighted = (labeled.float() * position_weight)
highest_score, rank = labeled_weighted.topk(k=1)
reciprocal_rank = 1.0/(rank.float() + 1)
reciprocal_rank[highest_score == 0] = 0
return reciprocal_rank.mean().item()
def auc(targets: csr_matrix,
user_ids: Union[np.array, torch.tensor],
preds: Union[np.array, torch.tensor],
k: Optional[Any] = None) -> float:
"""
Calculate the area under the ROC curve (AUC) for each user and average the results.
Parameters
----------
targets: scipy.sparse.csr_matrix
Interaction matrix containing user and item IDs
user_ids: np.array or torch.tensor
Users corresponding to the recommendations in the top k predictions
preds: torch.tensor
Tensor of shape (n_users x n_items) with each user's scores for each item
k: Any
Ignored, included only for compatibility with ``mapk``
Returns
-------
auc_score: float
"""
agg = 0
for i, user_id in enumerate(user_ids):
target_tensor = torch.tensor(
targets[user_id].toarray(),
device=preds.device,
dtype=torch.long
).view(-1)
# many models' ``preds`` may be unbounded if a final activation layer is not applied
# we have to normalize ``preds`` here to avoid a ``ValueError`` stating that ``preds``
# should be probabilities, but values were detected outside of [0,1] range
auc = auroc(torch.sigmoid(preds[i, :]), target=target_tensor, pos_label=1)
agg += auc
return (agg/len(user_ids)).item()
def evaluate_in_batches(
metric_list: Iterable[Callable[
[csr_matrix, Union[np.array, torch.tensor], Union[np.array, torch.tensor], Optional[int]],
float
]],
test_interactions: collie.interactions.Interactions,
model: collie.model.BasePipeline,
k: int = 10,
batch_size: int = 20,
logger: pytorch_lightning.loggers.base.LightningLoggerBase = None,
verbose: bool = True,
) -> List[float]:
"""
Evaluate a model with potentially several different metrics.
Memory constraints require that most test sets will need to be evaluated in batches. This
function handles the looping and batching boilerplate needed to properly evaluate the model
without running out of memory.
Parameters
----------
metric_list: list of functions
List of evaluation functions to apply. Each function must accept keyword arguments:
* ``targets``
* ``user_ids``
* ``preds``
* ``k``
test_interactions: collie.interactions.Interactions
Interactions to use as labels
model: collie.model.BasePipeline
Model that can take a (user_id, item_id) pair as input and return a recommendation score
k: int
Number of recommendations to consider per user. This is ignored by some metrics
batch_size: int
Number of users to score in a single batch. For best efficiency, this number should be as
high as possible without running out of memory
logger: pytorch_lightning.loggers.base.LightningLoggerBase
If provided, will log outputted metrics dictionary using the ``log_metrics`` method with
keys being the string representation of ``metric_list`` and values being
``evaluation_results``. Additionally, if ``model.hparams.num_epochs_completed`` exists, this
will be logged as well, making it possible to track metrics progress over the course of
model training
verbose: bool
Display progress bar and print statements during function execution
Returns
-------
evaluation_results: list
List of floats, with each metric value corresponding to the respective function passed in
``metric_list``
Examples
--------
.. code-block:: python
from collie.metrics import auc, evaluate_in_batches, mapk, mrr
map_10_score, mrr_score, auc_score = evaluate_in_batches(
metric_list=[mapk, mrr, auc],
test_interactions=test,
model=model,
)
print(map_10_score, mrr_score, auc_score)
"""
if not isinstance(test_interactions, Interactions):
raise ValueError(
'``test_interactions`` must be of type ``Interactions``, not '
f'{type(test_interactions)}. Try using ``explicit_evaluate_in_batches`` instead.'
)
device = _get_evaluate_in_batches_device(model=model)
model.to(device)
model._move_any_external_data_to_device()
test_users = np.unique(test_interactions.mat.row)
targets = test_interactions.mat.tocsr()
if len(test_users) < batch_size:
batch_size = len(test_users)
accumulators = [0] * len(metric_list)
data_to_iterate_over = range(int(np.ceil(len(test_users) / batch_size)))
if verbose:
data_to_iterate_over = tqdm(data_to_iterate_over)
for i in data_to_iterate_over:
user_range = test_users[i * batch_size:(i + 1) * batch_size]
preds = get_preds(model, user_range, test_interactions.num_items, device)
for metric_ind, metric in enumerate(metric_list):
score = metric(targets=targets, user_ids=user_range, preds=preds, k=k)
accumulators[metric_ind] += (score * len(user_range))
all_scores = [acc_score / len(test_users) for acc_score in accumulators]
if logger is not None:
_log_metrics(model=model,
logger=logger,
metric_list=metric_list,
all_scores=all_scores,
verbose=verbose)
return all_scores[0] if len(all_scores) == 1 else all_scores
def explicit_evaluate_in_batches(
metric_list: Iterable[Metric],
test_interactions: collie.interactions.ExplicitInteractions,
model: collie.model.BasePipeline,
logger: pytorch_lightning.loggers.base.LightningLoggerBase = None,
verbose: bool = True,
**kwargs,
) -> List[float]:
"""
Evaluate a model with potentially several different metrics.
Memory constraints require that most test sets will need to be evaluated in batches. This
function handles the looping and batching boilerplate needed to properly evaluate the model
without running out of memory.
Parameters
----------
metric_list: list of ``torchmetrics.Metric``
List of evaluation functions to apply. Each function must accept arguments for predictions
and targets, in order
test_interactions: collie.interactions.ExplicitInteractions
model: collie.model.BasePipeline
Model that can take a (user_id, item_id) pair as input and return a recommendation score
batch_size: int
Number of users to score in a single batch. For best efficiency, this number should be as
high as possible without running out of memory
logger: pytorch_lightning.loggers.base.LightningLoggerBase
If provided, will log outputted metrics dictionary using the ``log_metrics`` method with
keys being the string representation of ``metric_list`` and values being
``evaluation_results``. Additionally, if ``model.hparams.num_epochs_completed`` exists, this
will be logged as well, making it possible to track metrics progress over the course of
model training
verbose: bool
Display progress bar and print statements during function execution
**kwargs: keyword arguments
Additional arguments sent to the ``InteractionsDataLoader``
Returns
----------
evaluation_results: list
List of floats, with each metric value corresponding to the respective function passed in
``metric_list``
Examples
-------------
.. code-block:: python
import torchmetrics
from collie.metrics import explicit_evaluate_in_batches
mse_score, mae_score = evaluate_in_batches(
metric_list=[torchmetrics.MeanSquaredError(), torchmetrics.MeanAbsoluteError()],
test_interactions=test,
model=model,
)
print(mse_score, mae_score)
"""
if not isinstance(test_interactions, ExplicitInteractions):
raise ValueError(
'``test_interactions`` must be of type ``ExplicitInteractions``, not '
f'{type(test_interactions)}. Try using ``evaluate_in_batches`` instead.'
)
try:
device = _get_evaluate_in_batches_device(model=model)
model.to(device)
model._move_any_external_data_to_device()
test_loader = InteractionsDataLoader(interactions=test_interactions,
**kwargs)
data_to_iterate_over = test_loader
if verbose:
data_to_iterate_over = tqdm(test_loader)
for batch in data_to_iterate_over:
users, items, ratings = batch
# move data to batch before sending to model
users = users.to(device)
items = items.to(device)
ratings = ratings.cpu()
preds = model(users, items)
for metric in metric_list:
metric(preds.cpu(), ratings)
all_scores = [metric.compute() for metric in metric_list]
if logger is not None:
_log_metrics(model=model,
logger=logger,
metric_list=metric_list,
all_scores=all_scores,
verbose=verbose)
return all_scores[0] if len(all_scores) == 1 else all_scores
finally:
for metric in metric_list:
metric.reset()
def _get_evaluate_in_batches_device(model: BasePipeline):
device = getattr(model, 'device', None)
if torch.cuda.is_available() and str(device) == 'cpu':
warnings.warn('CUDA available but model device is set to CPU - is this desired?')
if device is None:
if torch.cuda.is_available():
warnings.warn(
'``model.device`` attribute is ``None``. Since GPU is available, putting model on '
'GPU.'
)
device = 'cuda:0'
else:
device = 'cpu'
return device
def _log_metrics(model: BasePipeline,
logger: pytorch_lightning.loggers.base.LightningLoggerBase,
metric_list: List[Union[Callable[..., Any], Metric]],
all_scores: List[float],
verbose: bool):
try:
step = model.hparams.get('num_epochs_completed')
except torch.nn.modules.module.ModuleAttributeError:
# if, somehow, there is no ``model.hparams`` attribute, this shouldn't fail
step = None
try:
metrics_dict = dict(zip([x.__name__ for x in metric_list], all_scores))
except AttributeError:
metrics_dict = dict(zip([type(x).__name__ for x in metric_list], all_scores))
if verbose:
print(f'Logging metrics {metrics_dict} to ``logger``...')
logger.log_metrics(metrics=metrics_dict, step=step)
| [
"torch.sigmoid",
"torch.arange",
"torch.isnan",
"torch.no_grad",
"torch.cuda.is_available",
"torch.tensor"
] | 1.10.1 | RomaKoks/collie_recs | bc8979c8dbf68deefb030336d50f07f788cf1667 |
1.8 | """
Description:
Author: Jiaqi Gu ([email protected])
Date: 2021-06-07 03:43:40
LastEditors: Jiaqi Gu ([email protected])
LastEditTime: 2021-06-07 03:43:40
"""
from torchonn.op.mzi_op import project_matrix_to_unitary
from typing import List, Union
import torch
from torch import Tensor, nn
from torch.types import Device, _size
from torchonn.layers import MZIBlockConv2d, MZIBlockLinear
from torchonn.models import ONNBaseModel
from collections import OrderedDict
__all__ = ["MZI_CLASS_CNN"]
class ConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
dilation: _size = 1,
groups: int = 1,
bias: bool = False,
miniblock: int = 8,
mode: str = "weight",
decompose_alg: str = "clements",
photodetect: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.conv = MZIBlockConv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
miniblock=miniblock,
mode=mode,
decompose_alg=decompose_alg,
photodetect=photodetect,
device=device,
)
self.bn = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU(inplace=True)
def forward(self, x: Tensor) -> Tensor:
return self.activation(self.bn(self.conv(x)))
class LinearBlock(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = False,
miniblock: int = 8,
mode: str = "weight",
decompose_alg: str = "clements",
photodetect: bool = False,
activation: bool = True,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.linear = MZIBlockLinear(
in_features,
out_features,
bias=bias,
miniblock=miniblock,
mode=mode,
decompose_alg=decompose_alg,
photodetect=photodetect,
device=device,
)
self.activation = nn.ReLU(inplace=True) if activation else None
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation is not None:
x = self.activation(x)
return x
class MZI_CLASS_CNN(ONNBaseModel):
"""
MZI CNN for classification.
Blocking matrix multiplication, which is much faster and more scalable than implementing the entire weight matrix on an MZI array.
Each block is implemented by a square MZI array
"""
_conv_linear = (MZIBlockConv2d, MZIBlockLinear)
_conv = (MZIBlockConv2d,)
_linear = (MZIBlockLinear,)
def __init__(
self,
img_height: int,
img_width: int,
in_channels: int,
num_classes: int,
kernel_list: List[int] = [32],
kernel_size_list: List[int] = [3],
stride_list: List[int] = [1],
padding_list: List[int] = [1],
dilation_list: List[int] = [1],
pool_out_size: int = 5,
hidden_list: List[int] = [32],
block_list: List[int] = [8],
mode: str = "usv",
decompose_alg: str = "clements",
photodetect: bool = True,
bias: bool = False,
device: Device = torch.device("cuda"),
) -> None:
super().__init__()
self.img_height = img_height
self.img_width = img_width
self.in_channels = in_channels
self.num_classes = num_classes
self.kernel_list = kernel_list
self.kernel_size_list = kernel_size_list
self.stride_list = stride_list
self.padding_list = padding_list
self.dilation_list = dilation_list
self.pool_out_size = pool_out_size
self.hidden_list = hidden_list
self.block_list = block_list
self.mode = mode
self.decompose_alg = decompose_alg
self.photodetect = photodetect
self.bias = bias
self.device = device
self.build_layers()
self.reset_parameters()
def build_layers(self):
self.features = OrderedDict()
for idx, out_channels in enumerate(self.kernel_list, 0):
layer_name = "conv" + str(idx + 1)
in_channels = self.in_channels if (idx == 0) else self.kernel_list[idx - 1]
self.features[layer_name] = ConvBlock(
in_channels,
out_channels,
kernel_size=self.kernel_size_list[idx],
stride=self.stride_list[idx],
padding=self.padding_list[idx],
dilation=self.dilation_list[idx],
groups=1,
bias=self.bias,
miniblock=self.block_list[idx],
mode=self.mode,
decompose_alg=self.decompose_alg,
photodetect=self.photodetect,
device=self.device,
)
self.features = nn.Sequential(self.features)
if self.pool_out_size > 0:
self.pool2d = nn.AdaptiveAvgPool2d(self.pool_out_size)
feature_size = self.kernel_list[-1] * self.pool_out_size * self.pool_out_size
else:
self.pool2d = None
img_height, img_width = self.img_height, self.img_width
for layer in self.modules():
if isinstance(layer, self._conv):
img_height, img_width = layer.get_output_dim(img_height, img_width)
feature_size = img_height * img_width * self.kernel_list[-1]
self.classifier = OrderedDict()
for idx, hidden_dim in enumerate(self.hidden_list, 0):
layer_name = "fc" + str(idx + 1)
in_channel = feature_size if idx == 0 else self.hidden_list[idx - 1]
out_channel = hidden_dim
self.classifier[layer_name] = LinearBlock(
in_channel,
out_channel,
bias=self.bias,
miniblock=self.block_list[idx + len(self.kernel_list)],
mode=self.mode,
decompose_alg=self.decompose_alg,
photodetect=self.photodetect,
activation=True,
device=self.device,
)
layer_name = "fc" + str(len(self.hidden_list) + 1)
self.classifier[layer_name] = LinearBlock(
self.hidden_list[-1] if len(self.hidden_list) > 0 else feature_size,
self.num_classes,
bias=self.bias,
miniblock=self.block_list[-1],
mode=self.mode,
decompose_alg=self.decompose_alg,
photodetect=self.photodetect,
activation=False,
device=self.device,
)
self.classifier = nn.Sequential(self.classifier)
def unitary_projection(self) -> None:
assert self.mode == "usv", "Unitary projection can only be applied in usv mode"
for m in self.modules():
if isinstance(m, self._conv_linear):
m.U.data.copy_(project_matrix_to_unitary(m.U.data))
m.V.data.copy_(project_matrix_to_unitary(m.V.data))
def forward(self, x: Tensor) -> Tensor:
x = self.features(x)
if self.pool2d is not None:
x = self.pool2d(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| [
"torch.device",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.flatten"
] | 1.8.0 | JeremieMelo/pytorch-onn | 670996112277a6c19c7da400afbe0a4ce45ad5de |
1.1 | import argparse
import torch
from deep_rl import random_seed, set_one_thread, select_device, Config, generate_tag, Task, TDAuxNet, NatureConvBody, \
LinearSchedule, AsyncReplay, ImageNormalizer, SignNormalizer, run_steps, mkdir
from deep_rl.agent.TDAux_agent import TDAuxAgent
import os
def td_aux_many(config: Config, **kwargs):
"""
:param config:
:param kwargs: kwargs used to generate the experiment tag name uses for saving.
:return:
"""
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config.merge(kwargs)
mkdir(os.path.join(config.data_dir, 'log'))
mkdir(os.path.join(config.data_dir, 'data'))
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
# aux_gammas = [0.0, 0.5, 0.9, 0.99]
aux_gammas = [0.99]
aux_dict = {str(g).replace(".", "_"): TDAuxNet.AuxCFG(g, loss_weight=10.0) for g in aux_gammas}
# aux_dict = {}
# config.optimizer_fn = lambda params: torch.optim.RMSprop(
# params, lr=0.00025, alpha=0.95, eps=0.01, centered=True)
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=1e-4)
# I'm just hard coding the shape of the target
config.network_fn = lambda: TDAuxNet((84, 84), config.action_dim,
NatureConvBody(in_channels=config.history_length), aux_dict)
config.random_action_prob = LinearSchedule(1.0, 0.01, 1e6)
config.replay_fn = lambda: AsyncReplay(memory_size=int(5e5), batch_size=32)
config.batch_size = 32
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.exploration_steps = 50000
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.history_length = 4
# config.double_q = True
config.double_q = False
run_steps(TDAuxAgent(config))
if __name__ == "__main__":
cf = Config()
cf.add_argument('--game', required=True)
cf.add_argument('--run', type=int, required=True)
cf.add_argument('--data_dir', type=str, required=True)
cf.add_argument('--save_interval', type=int, default=1000000)
cf.add_argument('--max_steps', type=int, default=int(2.5e7))
cf.merge()
set_one_thread()
select_device(0)
td_aux_many(cf, game=cf.game, run=cf.run, remark="aux_0.99_10.0")
| [
"torch.optim.Adam"
] | 1.1.0 | csherstan/DeepRL | fbf8da1f158792a0b9d29728c9d407ae40573070 |
1.3 | """
Entry point for training and evaluating a dependency parser.
This implementation combines a deep biaffine graph-based parser with linearization and distance features.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
"""
Training and evaluation for the parser.
"""
import sys
import os
import shutil
import time
import argparse
import logging
import numpy as np
import random
import torch
from torch import nn, optim
import stanza.models.depparse.data as data
from stanza.models.depparse.data import DataLoader
from stanza.models.depparse.trainer import Trainer
from stanza.models.depparse import scorer
from stanza.models.common import utils
from stanza.models.common import pretrain
from stanza.models.common.data import augment_punct
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
from stanza.models import _training_logging
logger = logging.getLogger('stanza')
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')
parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')
parser.add_argument('--wordvec_pretrain_file', type=str, default=None, help='Exact name of the pretrain file to read')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=400)
parser.add_argument('--char_hidden_dim', type=int, default=400)
parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)
parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--pretrain_max_vocab', type=int, default=250000)
parser.add_argument('--word_dropout', type=float, default=0.33)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--no_linearization', dest='linearization', action='store_false', help="Turn off linearization term.")
parser.add_argument('--no_distance', dest='distance', action='store_false', help="Turn off distance term.")
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=50000)
parser.add_argument('--eval_interval', type=int, default=100)
parser.add_argument('--max_steps_before_stop', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=5000)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
parser.add_argument('--augment_nopunct', type=float, default=None, help='Augment the training data by copying this fraction of punct-ending sentences as non-punct. Default of None will aim for roughly 10%')
args = parser.parse_args(args=args)
return args
def main(args=None):
args = parse_args(args=args)
if args.cpu:
args.cuda = False
utils.set_random_seed(args.seed, args.cuda)
args = vars(args)
logger.info("Running parser in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
# TODO: refactor with tagger
def model_file_name(args):
if args['save_name'] is not None:
save_name = args['save_name']
else:
save_name = args['shorthand'] + "_parser.pt"
return os.path.join(args['save_dir'], save_name)
# TODO: refactor with everywhere
def load_pretrain(args):
pt = None
if args['pretrain']:
pretrain_file = pretrain.find_pretrain_file(args['wordvec_pretrain_file'], args['save_dir'], args['shorthand'], args['lang'])
if os.path.exists(pretrain_file):
vec_file = None
else:
vec_file = args['wordvec_file'] if args['wordvec_file'] else utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
pt = pretrain.Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])
return pt
def train(args):
model_file = model_file_name(args)
utils.ensure_dir(os.path.split(model_file)[0])
# load pretrained vectors if needed
pretrain = load_pretrain(args)
# load data
logger.info("Loading data with batch size {}...".format(args['batch_size']))
train_data, _ = CoNLL.conll2dict(input_file=args['train_file'])
# possibly augment the training data with some amount of fake data
# based on the options chosen
logger.info("Original data size: {}".format(len(train_data)))
train_data.extend(augment_punct(train_data, args['augment_nopunct'],
keep_original_sentences=False))
logger.info("Augmented data size: {}".format(len(train_data)))
train_doc = Document(train_data)
train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
dev_doc = CoNLL.conll2doc(input_file=args['eval_file'])
dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
logger.info("Skip training because no data available...")
sys.exit(0)
logger.info("Training parser...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = 'Finished STEP {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
using_amsgrad = False
last_best_step = 0
# start training
train_loss = 0
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
logger.info(format_str.format(global_step, max_steps, loss, duration, current_lr))
if global_step % args['eval_interval'] == 0:
# eval on dev
logger.info("Evaluating on dev set...")
dev_preds = []
for batch in dev_batch:
preds = trainer.predict(batch)
dev_preds += preds
dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)
dev_batch.doc.set([HEAD, DEPREL], [y for x in dev_preds for y in x])
CoNLL.write_doc2conll(dev_batch.doc, system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / args['eval_interval'] # avg loss per batch
logger.info("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
train_loss = 0
# save best model
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
last_best_step = global_step
trainer.save(model_file)
logger.info("new best model saved.")
best_dev_preds = dev_preds
dev_score_history += [dev_score]
if global_step - last_best_step >= args['max_steps_before_stop']:
if not using_amsgrad:
logger.info("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
else:
do_break = True
break
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
train_batch.reshuffle()
logger.info("Training ended with {} steps.".format(global_step))
best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
logger.info("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = model_file_name(args)
# load pretrained vectors if needed
pretrain = load_pretrain(args)
# load model
logger.info("Loading model from: {}".format(model_file))
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load config
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
loaded_args[k] = args[k]
# load data
logger.info("Loading data with batch size {}...".format(args['batch_size']))
doc = CoNLL.conll2doc(input_file=args['eval_file'])
batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
if len(batch) > 0:
logger.info("Start evaluation...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
else:
# skip eval if dev data does not exist
preds = []
preds = utils.unsort(preds, batch.data_orig_idx)
# write to file and score
batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])
CoNLL.write_doc2conll(batch.doc, system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
logger.info("Parser score:")
logger.info("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| [
"torch.cuda.is_available"
] | 1.3.0 | asears/stanza | f91ca215e175d4f7b202259fe789374db7829395 |
1.3 | import torch
import torch.nn as nn
class LangIDBiLSTM(nn.Module):
"""
Multi-layer BiLSTM model for language detecting. A recreation of "A reproduction of Apple's bi-directional LSTM models
for language identification in short strings." (Toftrup et al 2021)
Arxiv: https://arxiv.org/abs/2102.06282
GitHub: https://github.com/AU-DIS/LSTM_langid
"""
def __init__(self, char_to_idx, tag_to_idx, num_layers, embedding_dim, hidden_dim, batch_size=64, weights=None,
dropout=0.0, lang_subset=None):
super(LangIDBiLSTM, self).__init__()
self.num_layers = num_layers
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.char_to_idx = char_to_idx
self.vocab_size = len(char_to_idx)
self.tag_to_idx = tag_to_idx
self.idx_to_tag = [i[1] for i in sorted([(v,k) for k,v in self.tag_to_idx.items()])]
self.lang_subset = lang_subset
self.padding_idx = char_to_idx["<PAD>"]
self.tagset_size = len(tag_to_idx)
self.batch_size = batch_size
self.loss_train = nn.CrossEntropyLoss(weight=weights)
self.dropout_prob = dropout
# embeddings for chars
self.char_embeds = nn.Embedding(
num_embeddings=self.vocab_size,
embedding_dim=self.embedding_dim,
padding_idx=self.padding_idx
)
# the bidirectional LSTM
self.lstm = nn.LSTM(
self.embedding_dim,
self.hidden_dim,
num_layers=self.num_layers,
bidirectional=True,
batch_first=True
)
# convert output to tag space
self.hidden_to_tag = nn.Linear(
self.hidden_dim * 2,
self.tagset_size
)
# dropout layer
self.dropout = nn.Dropout(p=self.dropout_prob)
def build_lang_mask(self, use_gpu=None):
"""
Build language mask if a lang subset is specified (e.g. ["en", "fr"])
"""
device = torch.device("cuda") if use_gpu else None
lang_mask_list = [int(lang in self.lang_subset) for lang in self.idx_to_tag] if self.lang_subset else \
[1 for lang in self.idx_to_tag]
self.lang_mask = torch.tensor(lang_mask_list, device=device, dtype=torch.float)
def loss(self, Y_hat, Y):
return self.loss_train(Y_hat, Y)
def forward(self, x):
# embed input
x = self.char_embeds(x)
# run through LSTM
x, _ = self.lstm(x)
# run through linear layer
x = self.hidden_to_tag(x)
# sum character outputs for each sequence
x = torch.sum(x, dim=1)
return x
def prediction_scores(self, x):
prediction_probs = self(x)
if self.lang_subset:
prediction_batch_size = prediction_probs.size()[0]
batch_mask = torch.stack([self.lang_mask for _ in range(prediction_batch_size)])
prediction_probs = prediction_probs * batch_mask
return torch.argmax(prediction_probs, dim=1)
def save(self, path):
""" Save a model at path """
checkpoint = {
"char_to_idx": self.char_to_idx,
"tag_to_idx": self.tag_to_idx,
"num_layers": self.num_layers,
"embedding_dim": self.embedding_dim,
"hidden_dim": self.hidden_dim,
"model_state_dict": self.state_dict()
}
torch.save(checkpoint, path)
@classmethod
def load(cls, path, use_cuda=False, batch_size=64, lang_subset=None):
""" Load a serialized model located at path """
if use_cuda:
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
else:
device = torch.device("cpu")
checkpoint = torch.load(path, map_location=torch.device("cpu"))
weights = checkpoint["model_state_dict"]["loss_train.weight"]
model = cls(checkpoint["char_to_idx"], checkpoint["tag_to_idx"], checkpoint["num_layers"],
checkpoint["embedding_dim"], checkpoint["hidden_dim"], batch_size=batch_size, weights=weights,
lang_subset=lang_subset)
model.load_state_dict(checkpoint["model_state_dict"])
if use_cuda:
model.to(torch.device("cuda"))
model.build_lang_mask(use_gpu=use_cuda)
return model
| [
"torch.nn.Linear",
"torch.device",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.argmax",
"torch.nn.CrossEntropyLoss",
"torch.save",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.Embedding",
"torch.sum"
] | 1.3.0 | asears/stanza | f91ca215e175d4f7b202259fe789374db7829395 |
1.5 | import numpy as np
from tqdm import tqdm
import torch
import pdb
from typing import Iterator
from allennlp.data import Instance
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer, PretrainedTransformerIndexer
from allennlp.data.fields import SpanField, ListField, TextField, MetadataField, ArrayField, SequenceLabelField, LabelField
from allennlp.data.tokenizers import Token
from utils import OnlyFixedDatasetLoader, KBConstructor_fromKGemb, FixedNegativesEntityLoader
from overrides import overrides
import random
import transformers
from utils import from_original_sentence2left_mention_right_tokens_before_berttokenized
# SEEDS are FIXED
torch.backends.cudnn.deterministic = True
seed = 777
np.random.seed(seed)
torch.manual_seed(seed)
class FixedDatasetTokenizedReader(DatasetReader):
def __init__(self,args, canonical_and_def_connecttoken, token_indexers=None):
super().__init__(lazy=args.allen_lazyload)
self.args = args
self.max_context_len = args.max_context_len
self.max_canonical_len = args.max_canonical_len
self.max_def_len = args.max_def_len
self.token_indexers = self.token_indexer_returner()
self.berttokenizer = self.berttokenizer_returner()
linking_dataset_loader = OnlyFixedDatasetLoader(args=args)
self.id2line, self.train_mention_id, self.dev_mention_id, self.test_mention_id = linking_dataset_loader.id2line_trn_dev_test_loader()
print('loading KB')
self.kbclass = KBConstructor_fromKGemb(args=self.args)
self.setting_original_KB()
print('original KB loaded')
self.ignored_mention_idxs = self.to_be_ignored_mention_idx_checker()
self.mention_start_token, self.mention_end_token = '[unused1]', '[unused2]'
self.canonical_and_def_connecttoken = canonical_and_def_connecttoken
def setting_original_KB(self):
self.cui2idx, self.idx2cui, self.cui2emb, self.cui2cano, self.cui2def = self.kbclass.return_original_KB()
def currently_stored_KB_dataset_returner(self):
return self.cui2idx, self.idx2cui, self.cui2emb, self.cui2cano, self.cui2def
def huggingfacename_returner(self):
'Return huggingface modelname and do_lower_case parameter'
if self.args.bert_name == 'bert-base-uncased':
return 'bert-base-uncased', True
elif self.args.bert_name == 'biobert':
return './biobert_transformers/', False
else:
print('Currently',self.args.bert_name,'are not supported.')
exit()
def token_indexer_returner(self):
huggingface_name, do_lower_case = self.huggingfacename_returner()
return {'tokens': PretrainedTransformerIndexer(
model_name=huggingface_name,
do_lowercase=do_lower_case)
}
def berttokenizer_returner(self):
if self.args.bert_name == 'bert-base-uncased':
vocab_file = './vocab_file/bert-base-uncased-vocab.txt'
do_lower_case = True
elif self.args.bert_name == 'biobert':
vocab_file = './vocab_file/biobert_v1.1_pubmed_vocab.txt'
do_lower_case = False
else:
print('currently not supported:', self.args.bert_name)
raise NotImplementedError
return transformers.BertTokenizer(vocab_file=vocab_file,
do_lower_case=do_lower_case,
do_basic_tokenize=True,
never_split=['<target>','</target>'])
def tokenizer_custom(self, txt):
target_anchors = ['<target>', '</target>']
original_tokens = txt.split(' ')
new_tokens = list()
for token in original_tokens:
if token in target_anchors:
new_tokens.append(token)
continue
else:
split_to_subwords = self.berttokenizer.tokenize(token) # token is oneword, split_tokens
if ['[CLS]'] in split_to_subwords:
split_to_subwords.remove('[CLS]')
if ['[SEP]'] in split_to_subwords:
split_to_subwords.remove('[SEP]')
if split_to_subwords == []:
new_tokens.append('[UNK]')
else:
new_tokens += split_to_subwords
return new_tokens
def mention_and_contexttokenizer_followblinkimplementation(self, txt):
'''
Args: sentence with space, including target anchor
txt:
Returns: [[CLS], split_sub0, ..., [mention_start], mention, [mention_end], ..., [SEP]]
'''
mention_start = '<target>'
mention_end = '</target>'
left, mention, right = from_original_sentence2left_mention_right_tokens_before_berttokenized(txt)
new_tokens = list()
new_tokens.append('[CLS]')
if len(left) != 0:
left_tokens = []
for one_token in left:
left_tokens += self.berttokenizer.tokenize(one_token)
new_tokens += left_tokens[:self.args.max_left_context_len]
new_tokens.append(self.mention_start_token)
if len(mention) != 0:
mention_tokens = []
for one_token in mention:
mention_tokens += self.berttokenizer.tokenize(one_token)
new_tokens += mention_tokens[:self.args.max_mention_len]
new_tokens.append(self.mention_end_token)
if len(right) != 0:
right_tokens = []
for one_token in right:
right_tokens += self.berttokenizer.tokenize(one_token)
new_tokens += right_tokens[:self.args.max_right_context_len]
new_tokens.append('[SEP]')
return new_tokens
def find_anchor(self,split_txt,tobefoundtoken):
for i, word in enumerate(split_txt):
if word == tobefoundtoken:
return i
return -1
def left_right_mention_sentence_from_anchorincludedsentence_returner(self, split_txt):
i = self.find_anchor(split_txt=split_txt, tobefoundtoken='<target>') # mention start
j = self.find_anchor(split_txt=split_txt, tobefoundtoken='</target>') # mention end
sfm_mention = split_txt[i+1:j]
raw_sentence_noanchor = [token for token in split_txt if not token in ['<target>', '</target>']]
left_context_include_mention = split_txt[:j]
left_context_include_mention.remove('<target>')
right_context_include_mention = split_txt[i+1:]
right_context_include_mention.remove('</target>')
return raw_sentence_noanchor, sfm_mention, left_context_include_mention, right_context_include_mention
@overrides
def _read(self, train_dev_testflag) -> Iterator[Instance]:
mention_ids = list()
if train_dev_testflag == 'train':
mention_ids += self.train_mention_id
# Because original data is sorted with pmid documents, we have to shuffle data points for in-batch training.
random.shuffle(mention_ids)
elif train_dev_testflag == 'dev':
mention_ids += self.dev_mention_id
elif train_dev_testflag == 'test':
mention_ids += self.test_mention_id
for idx, mention_uniq_id in tqdm(enumerate(mention_ids)):
if mention_uniq_id in self.ignored_mention_idxs:
continue
if self.args.model_for_training == 'blink_implementation_inbatchencoder':
data = self.linesparser_for_blink_implementation(line=self.id2line[mention_uniq_id], mention_uniq_id=mention_uniq_id)
else:
data = self.lineparser_for_local_mentions(line=self.id2line[mention_uniq_id], mention_uniq_id=mention_uniq_id)
yield self.text_to_instance(data=data)
def lineparser_for_local_mentions(self, line, mention_uniq_id):
'''
Now this function is going to be depreceated,
since we gonna follow faithfully with "Zero-shot entity linking with dense entity retrieval"
Args:
line:
train_dev_testflag:
mention_uniq_id:
Returns:
'''
gold_cui, gold_type, gold_surface_mention, targetanchor_included_sentence = line.split('\t')
tokenized_context_including_target_anchors = self.tokenizer_custom(txt=targetanchor_included_sentence)
raw_sentence_noanchor, sfm_mention, left_context_include_mention, right_context_include_mention = self.left_right_mention_sentence_from_anchorincludedsentence_returner(
split_txt=tokenized_context_including_target_anchors)
data = {}
data['mention_uniq_id'] = mention_uniq_id
data['gold_ids'] = gold_cui # str
data['gold_id_idx_with_cui2idx'] = int(self.cui2idx[gold_cui])
data['mention_raw'] = gold_surface_mention
data['raw_sentence_without_anchor_str'] = ' '.join(raw_sentence_noanchor)
data['context'] = [Token(word) for word in raw_sentence_noanchor][:self.args.max_context_len]
data['mention_preprocessed'] = [Token(word) for word in sfm_mention][:self.max_context_len]
if len(left_context_include_mention) <= self.max_context_len:
data['left_context_include_mention'] = [Token(word) for word in left_context_include_mention]
else:
data['left_context_include_mention'] = [Token(word) for word in left_context_include_mention][
len(left_context_include_mention) - self.max_context_len:]
data['right_context_include_mention'] = [Token(word) for word in right_context_include_mention][:self.max_context_len]
data['context'].insert(0, Token('[CLS]'))
data['context'].insert(len(data['context']), Token('[SEP]'))
data['mention_preprocessed'].insert(0, Token('[CLS]'))
data['mention_preprocessed'].insert(len(data['mention_preprocessed']), Token('[SEP]'))
data['left_context_include_mention'].insert(0, Token('[CLS]'))
data['left_context_include_mention'].insert(len(data['left_context_include_mention']), Token('[SEP]'))
data['right_context_include_mention'].insert(0, Token('[CLS]'))
data['right_context_include_mention'].insert(len(data['right_context_include_mention']), Token('[SEP]'))
data['gold_cui_cano_and_def_concatenated'] = self.gold_canonical_and_def_concatenated_returner(gold_cui=gold_cui)
return data
def linesparser_for_blink_implementation(self, line, mention_uniq_id):
gold_cui, gold_type, gold_surface_mention, targetanchor_included_sentence = line.split('\t')
gold_cui = gold_cui.replace('UMLS:', '')
tokenized_context_including_target_anchors = self.mention_and_contexttokenizer_followblinkimplementation(txt=targetanchor_included_sentence)
tokenized_context_including_target_anchors = [Token(split_token) for split_token in tokenized_context_including_target_anchors]
data = {}
data['context'] = tokenized_context_including_target_anchors
data['gold_cui_cano_and_def_concatenated'] = self.gold_canonical_and_def_concatenated_returner(gold_cui=gold_cui)
data['gold_cuidx'] = int(self.cui2idx[gold_cui])
data['mention_uniq_id'] = int(mention_uniq_id)
return data
def gold_canonical_and_def_concatenated_returner(self, gold_cui):
canonical = self.tokenizer_custom(txt=self.cui2cano[gold_cui])
definition = self.tokenizer_custom(txt=self.cui2def[gold_cui])
concatenated = ['[CLS]']
concatenated += canonical[:self.max_canonical_len]
concatenated.append(self.canonical_and_def_connecttoken)
concatenated += definition[:self.max_def_len]
concatenated.append('[SEP]')
return [Token(tokenized_word) for tokenized_word in concatenated]
def to_be_ignored_mention_idx_checker(self):
to_be_ignored_mention_idxs = []
all_mention_idxs = list()
all_mention_idxs += self.train_mention_id
all_mention_idxs += self.dev_mention_id
all_mention_idxs += self.test_mention_id
for mention_idx in all_mention_idxs:
gold_cui_or_dui = self.id2line[mention_idx].split('\t')[0].replace('UMLS:', '')
if gold_cui_or_dui not in self.cui2idx:
to_be_ignored_mention_idxs.append(mention_idx)
return to_be_ignored_mention_idxs
@overrides
def text_to_instance(self, data=None) -> Instance:
if self.args.model_for_training == 'blink_implementation_inbatchencoder':
context_field = TextField(data['context'], self.token_indexers)
fields = {"context": context_field}
fields['gold_cui_cano_and_def_concatenated'] = TextField(data['gold_cui_cano_and_def_concatenated'], self.token_indexers)
fields['gold_cuidx'] = ArrayField(np.array(data['gold_cuidx']))
fields['mention_uniq_id'] = ArrayField(np.array(data['mention_uniq_id']))
else:
context_field = TextField(data['context'], self.token_indexers)
fields = {"context": context_field}
surface_mention_field = TextField(data['mention_preprocessed'], self.token_indexers)
fields['left_context_include_mention'] = TextField(data['left_context_include_mention'], self.token_indexers)
fields['right_context_include_mention'] = TextField(data['right_context_include_mention'], self.token_indexers)
fields['mention_processed'] = surface_mention_field
fields['gold_cui_cano_and_def_concatenated'] = TextField(data['gold_cui_cano_and_def_concatenated'], self.token_indexers)
fields['gold_id_for_knn'] = ArrayField(np.array(data['gold_id_idx_with_cui2idx']))
return Instance(fields)
'''
For encoding all entities, we need another datasetreader
'''
class AllEntityCanonical_and_Defs_loader(DatasetReader):
def __init__(self, args, idx2cui, cui2cano, cui2def,
textfield_embedder, pretrained_tokenizer, tokenindexer, canonical_and_def_connect_token):
super().__init__(lazy=args.allen_lazyload)
self.args = args
self.idx2cui = idx2cui
self.cui2cano = cui2cano
self.cui2def = cui2def
self.textfield_embedder = textfield_embedder
self.pretrained_tokenizer = pretrained_tokenizer
self.token_indexers = tokenindexer
self.canonical_and_def_connect_token = canonical_and_def_connect_token
@overrides
def _read(self,file_path=None) -> Iterator[Instance]:
for idx, cui in tqdm(self.idx2cui.items()):
if self.args.debug_for_entity_encoder and idx==2100:
break
data = self.cui2data(cui=cui, idx=idx)
yield self.text_to_instance(data=data)
@overrides
def text_to_instance(self, data=None) -> Instance:
cano_and_def_concatenated = TextField(data['cano_and_def_concatenated'], self.token_indexers)
fields = {"cano_and_def_concatenated": cano_and_def_concatenated, 'cui_idx':ArrayField(np.array(data['cui_idx'], dtype='int32'))}
return Instance(fields)
def tokenizer_custom(self, txt):
original_tokens = txt.split(' ')
new_tokens = list()
for token in original_tokens:
split_to_subwords = self.pretrained_tokenizer.tokenize(token) # token is oneword, split_tokens
if ['[CLS]'] in split_to_subwords:
split_to_subwords.remove('[CLS]')
if ['[SEP]'] in split_to_subwords:
split_to_subwords.remove('[SEP]')
if split_to_subwords == []:
new_tokens.append('[UNK]')
else:
new_tokens += split_to_subwords
return new_tokens
def cui2data(self, cui, idx):
canonical_plus_definition = []
canonical_plus_definition.append('[CLS]')
canonical = self.cui2cano[cui]
canonical_tokens = [split_word for split_word in self.tokenizer_custom(txt=canonical)]
canonical_plus_definition += canonical_tokens[:self.args.max_canonical_len]
canonical_plus_definition.append(self.canonical_and_def_connect_token)
definition = self.cui2def[cui]
definition_tokens = [split_word for split_word in self.tokenizer_custom(txt=definition)]
canonical_plus_definition += definition_tokens[:self.args.max_def_len]
canonical_plus_definition.append('[SEP]')
return {'cano_and_def_concatenated':[ Token(split_word_) for split_word_ in canonical_plus_definition],
'cui_idx': idx} | [
"torch.manual_seed"
] | 1.5.1 | ruanchaves/Dual-encoder-Entity-Retrieval-with-BERT | ff8c7933afaf0b2c40a7df0250f4b82a5868dc2a |
1.2 | import torch.nn as nn
import torch
import torch.nn.functional as F
import torchvision.models
import os
import utils.network_utils
from utils.pointnet2_utils import PointNetSetAbstraction,PointNetFeaturePropagation
import cuda.emd.emd_module as emd
# Set the path for pretrain weight
os.environ['TORCH_HOME'] = '/media/caig/FECA2C89CA2C406F/sketch3D/pretrain_models'
Conv = nn.Conv2d
def wrapper(func, *args, **kwargs):
class Wrapper(nn.Module):
def __init__(self):
super().__init__()
self.func = func
def forward(self, input):
return self.func(input, *args, **kwargs)
return Wrapper()
class TransformPC(nn.Module):
"""
Transform point cloud to camera coordinate
Input:
xyz: float tensor, (BS,N_PTS,3); input point cloud
values assumed to be in (-1,1)
az: float tensor, (BS); azimuthal angle of camera in radians
el: float tensor, (BS); elevation of camera in radians
Output:
xyz_out: float tensor, (BS,N_PTS,3); output point cloud in camera
co-ordinates
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.n_pts = cfg.CONST.NUM_POINTS
def forward(self, xyz, az, el):
batch_size = xyz.size(0)
cam_xyz = self.world2cam(xyz, az, el, batch_size, N_PTS=self.n_pts)
return cam_xyz
def world2cam(self, xyz, az, el, batch_size, N_PTS=1024):
# y ---> x
rotmat_az=[
[torch.cos(az),torch.sin(az),torch.zeros_like(az)],
[-torch.sin(az),torch.cos(az),torch.zeros_like(az)],
[torch.zeros_like(az),torch.zeros_like(az), torch.ones_like(az)]
]
rotmat_az = [ torch.stack(x) for x in rotmat_az ]
# z ---> x, in dataloader, az = original az - 90 degree, which means here is actually x ----> -z
rotmat_el=[
[torch.cos(el),torch.zeros_like(az), torch.sin(el)],
[torch.zeros_like(az),torch.ones_like(az),torch.zeros_like(az)],
[-torch.sin(el),torch.zeros_like(az), torch.cos(el)]
]
rotmat_el = [ torch.stack(x) for x in rotmat_el ]
rotmat_az = torch.stack(rotmat_az, 0) # [3,3,B]
rotmat_el = torch.stack(rotmat_el, 0) # [3,3,B]
rotmat_az = rotmat_az.permute(2, 0, 1) # [B,3,3]
rotmat_el = rotmat_el.permute(2, 0, 1) # [B,3,3]
rotmat = torch.matmul(rotmat_el, rotmat_az)
# Transformation(t)
# Distance of object from camera - fixed to 2
d = 2.
# Calculate translation params
tx, ty, tz = [0, 0, d]
tr_mat = torch.unsqueeze(torch.tensor([tx, ty, tz]), 0).repeat(batch_size, 1) # [B,3]
tr_mat = torch.unsqueeze(tr_mat,2) # [B,3,1]
tr_mat = tr_mat.permute(0, 2, 1) # [B,1,3]
tr_mat = tr_mat.repeat(1, N_PTS, 1) # [B,N_PTS,3]
tr_mat = utils.network_utils.var_or_cuda(tr_mat) # [B,N_PTS,3]
xyz_out = torch.matmul(rotmat, xyz.permute(0, 2, 1)) - tr_mat.permute(0, 2, 1)
return xyz_out.permute(0, 2, 1)
class FeatureProjection(nn.Module):
"""
Project the pointcloud to 2d image and get the corresponding image features at
the project location
Input:
img_feats: multi-scale image features
pc: input point clouds (in camera coordinate) [B, N, 3]
Output:
pc_feats_trans: pointcloud xyz + multi-view image features (by feature ptojection)
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.concat = wrapper(torch.cat, dim=-1)
def forward(self, img_feats, pc):
pc_feats = []
pc_feats += [self.get_projection(img_feat, pc) for img_feat in img_feats]
pc_feats_trans = self.concat(pc_feats)
return pc_feats_trans
def _project(self, img_feats, xs, ys):
x, y = xs.flatten(), ys.flatten()
idb = torch.arange(img_feats.shape[0], device=img_feats.device)
idb = idb[None].repeat(xs.shape[1], 1).t().flatten().long()
x1, y1 = torch.floor(x), torch.floor(y)
x2, y2 = torch.ceil(x), torch.ceil(y)
q11 = img_feats[idb, :, x1.long(), y1.long()].to(img_feats.device)
q12 = img_feats[idb, :, x1.long(), y2.long()].to(img_feats.device)
q21 = img_feats[idb, :, x2.long(), y1.long()].to(img_feats.device)
q22 = img_feats[idb, :, x2.long(), y2.long()].to(img_feats.device)
weights = ((x2 - x) * (y2 - y)).unsqueeze(1)
q11 *= weights
weights = ((x - x1) * (y2 - y)).unsqueeze(1)
q21 *= weights
weights = ((x2 - x) * (y - y1)).unsqueeze(1)
q12 *= weights
weights = ((x - x1) * (y - y1)).unsqueeze(1)
q22 *= weights
out = q11 + q12 + q21 + q22
return out.view(img_feats.shape[0], -1, img_feats.shape[1])
def get_projection(self, img_feat, pc):
_, _, h_, w_ = tuple(img_feat.shape)
X, Y, Z = pc[..., 0], pc[..., 1], pc[..., 2]
w = (420.*X/abs(Z) + (111.5))
h = (420.*Y/abs(Z) + (111.5))
w = torch.clamp(w, 0., 223.)
h = torch.clamp(h, 0., 223.)
x = w / (223. / (w_ - 1.))
y = h / (223. / (h_ - 1.))
feats = self._project(img_feat, x, y)
return feats
class PointNet2(nn.Module):
"""
Point cloud segmentation (set abstraction + feature propagation) in pointnet++
Input:
xyz: input points position [B, N, 3]
output:
point_feature: per-point features encode by pointnet [B, 128, N]
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.1, nsample=64, in_channel=3, mlp=[64, 64, 128], group_all=False)
self.sa2 = PointNetSetAbstraction(npoint=384, radius=0.2, nsample=64, in_channel=128 + 3, mlp=[128, 128, 256], group_all=False)
self.sa3 = PointNetSetAbstraction(npoint=128, radius=0.4, nsample=64, in_channel=256 + 3, mlp=[256, 256, 512], group_all=False)
self.sa4 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[512, 512, 1024], group_all=True)
self.fp4 = PointNetFeaturePropagation(in_channel=512 + 1024, mlp=[512, 512])
self.fp3 = PointNetFeaturePropagation(in_channel=256 + 512 , mlp=[512, 256])
self.fp2 = PointNetFeaturePropagation(in_channel=128 + 256 , mlp=[256, 128])
self.fp1 = PointNetFeaturePropagation(in_channel=0 + 128 , mlp=[128, 128, 128])
def forward(self, xyz):
xyz = xyz.transpose(2, 1) # [B, C, N]
l0_xyz = xyz
l0_points = None
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
l4_xyz, l4_points = self.sa4(l3_xyz, l3_points)
l3_points = self.fp4(l3_xyz, l4_xyz, l3_points, l4_points)
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, l0_points, l1_points)
return l0_points
class LinearDisplacementNet(nn.Module):
"""
Predict the displacement from pointcloud features and image features
Input:
pc_features: poincloud features from pointnet2 [B, D, N]
proj_features: image features from feature projection [B, N, D']
noises: noises vector [B, N, n_length]
Output:
displacement: perpoint displacement [B, C, N]
"""
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.conv1 = nn.Conv1d(1120, 960, 1)
self.bn1 = nn.BatchNorm1d(960)
self.conv2 = nn.Conv1d(960, 512, 1)
self.bn2 = nn.BatchNorm1d(512)
self.conv3 = nn.Conv1d(512, 256, 1)
self.bn3 = nn.BatchNorm1d(256)
self.conv4 = nn.Conv1d(256, 128, 1)
self.bn4 = nn.BatchNorm1d(128)
self.conv5 = nn.Conv1d(128, 64, 1)
self.bn5 = nn.BatchNorm1d(64)
self.conv6 = nn.Conv1d(64, 3, 1)
def forward(self, transform_xyz, proj_features, pc_features, noises):
noises = noises.transpose(2, 1) # [B, n_length, N]
noises = utils.network_utils.var_or_cuda(noises)
proj_features = proj_features.transpose(2, 1) # [B, D', N]
proj_features = utils.network_utils.var_or_cuda(proj_features)
# concat the img features after each point features
refine_features = torch.cat((pc_features, proj_features, noises), 1) # [B, D+D'+n_length, N]
refine_features = F.relu(self.bn1(self.conv1(refine_features)))
refine_features = F.relu(self.bn2(self.conv2(refine_features)))
refine_features = F.relu(self.bn3(self.conv3(refine_features)))
refine_features = F.relu(self.bn4(self.conv4(refine_features)))
refine_features = F.relu(self.bn5(self.conv5(refine_features)))
displacements = self.conv6(refine_features)
displacements = F.sigmoid(displacements) * self.cfg.REFINE.RANGE_MAX * 2 - self.cfg.REFINE.RANGE_MAX
return displacements
class GRAPHX_REFINE_MODEL(nn.Module):
"""
Refine the point cloud based on the input image
Input:
xyz: point cloud from reconstruction model
Ouput:
update_pc: updated point cloud
"""
def __init__(self, cfg, in_channels, optimizer=None):
super().__init__()
self.cfg = cfg
# Refinement
self.transform_pc = TransformPC(cfg)
self.feature_projection = FeatureProjection(cfg)
self.pc_encode = PointNet2(cfg)
self.displacement_net = LinearDisplacementNet(cfg)
self.optimizer = None if optimizer is None else optimizer(self.parameters())
# emd loss
self.emd_dist = emd.emdModule()
if torch.cuda.is_available():
self.transform_pc = torch.nn.DataParallel(self.transform_pc, device_ids=cfg.CONST.DEVICE).cuda()
self.feature_projection = torch.nn.DataParallel(self.feature_projection, device_ids=cfg.CONST.DEVICE).cuda()
self.pc_encode = torch.nn.DataParallel(self.pc_encode, device_ids=cfg.CONST.DEVICE).cuda()
self.displacement_net = torch.nn.DataParallel(self.displacement_net, device_ids=cfg.CONST.DEVICE).cuda()
self.emd_dist = torch.nn.DataParallel(self.emd_dist, device_ids=cfg.CONST.DEVICE).cuda()
self.cuda()
def train_step(self, img_features, xyz, gt_pc, view_az, view_el):
'''
Input:
img_features
init pc: [B, N, 3]
gt pc: [B, N, 3]
view_az: [B]
view_el: [B]
Output:
loss
pred_pc: [B, N, 3]
'''
refine_pc = self.refine(img_features, xyz, view_az, view_el)
# compute reconstruction loss
emd_loss, _ = self.emd_dist(
refine_pc, gt_pc, eps=0.005, iters=50
)
rec_loss = torch.sqrt(emd_loss).mean(1).mean()
self.refiner_backward(rec_loss)
rec_loss_np = rec_loss.detach().item()
return rec_loss_np*1000
def valid_step(self, img_features, xyz, gt_pc, view_az, view_el):
# refine the point cloud
refine_pc = self.refine(img_features, xyz, view_az, view_el)
# compute reconstruction loss
emd_loss, _ = self.emd_dist(
refine_pc, gt_pc, eps=0.005, iters=50
)
rec_loss = torch.sqrt(emd_loss).mean(1).mean()
return rec_loss*1000, pred_pc
def refine(self, img_features, xyz, view_az, view_el):
# img_features = self.img_enc(img)
transform_xyz = self.transform_pc(xyz, view_az, view_el)
proj_features = self.feature_projection(img_features, transform_xyz)
pc_features = self.pc_encode(transform_xyz)
noises = torch.normal(mean=0.0, std=1, size=(self.cfg.CONST.BATCH_SIZE, self.cfg.CONST.NUM_POINTS, self.cfg.REFINE.NOISE_LENGTH))
displacements = self.displacement_net(transform_xyz, proj_features, pc_features, noises)
displacements = displacements.transpose(2, 1)
refine_pc = xyz + displacements
return refine_pc
def refiner_backward(self, rec_loss):
self.train(True)
self.optimizer.zero_grad()
rec_loss.backward()
self.optimizer.step()
| [
"torch.cat",
"torch.stack",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.sqrt",
"torch.nn.Conv1d",
"torch.normal",
"torch.unsqueeze",
"torch.ceil",
"torch.tensor",
"torch.zeros_like",
"torch.cos",
"torch.nn.functional.sigmoid",
"torch.clamp",
"torch.matmul",
"torch.arange",
"torch.sin",
"torch.nn.BatchNorm1d",
"torch.ones_like",
"torch.floor"
] | 1.2.0 | brian220/Sketch2PointCloud | 17e8657ffc6605804ab4f1da89f446ea4d37665c |
1.7 | # modify from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501
import os
import torch
from torch.autograd import Function
from torch.nn import functional as F
BASICSR_JIT = os.getenv('BASICSR_JIT')
if BASICSR_JIT == 'True':
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
upfirdn2d_ext = load(
'upfirdn2d',
sources=[
os.path.join(module_path, 'src', 'upfirdn2d.cpp'),
os.path.join(module_path, 'src', 'upfirdn2d_kernel.cu'),
],
)
else:
try:
from . import upfirdn2d_ext
except ImportError:
pass
# avoid annoying print output
# print(f'Cannot import deform_conv_ext. Error: {error}. You may need to: \n '
# '1. compile with BASICSR_EXT=True. or\n '
# '2. set BASICSR_JIT=True during running')
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_ext.upfirdn2d(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_ext.upfirdn2d(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0],
# ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_ext.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]))
return out
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
| [
"torch.nn.functional.pad",
"torch.nn.functional.conv2d",
"torch.flip"
] | 1.7 | marcelodiaz558/BasicSR | 1d5138ed567e966965fd1540838d27e6f5082b70 |
1.9 | from torch import nn
import torch
class LogisticRegression(nn.Module):
def __init__(self,
theta_params: int):
super(LogisticRegression, self).__init__()
self.__linear = nn.Linear(theta_params, 1)
self.__sigmoid_layer = nn.Sigmoid()
def forward(self,
x_input: torch.tensor) -> torch.tensor:
return self.__sigmoid_layer(self.__linear(x_input))
| [
"torch.nn.Linear",
"torch.nn.Sigmoid"
] | 1.9.1 | govindansriram/CobraML | d231d2e446df7e7860071f5d7cfa1e31afa99c6b |
1.7 | from torch.utils.data import Dataset
import numpy as np
import torch
from . import functions
class TokensDataset(Dataset):
def __init__(self, X, Y):
self.X = self.encode_x(X)
self.y = Y
@staticmethod
def encode_x(x: list) -> list:
max_len = len(max(x, key=lambda i: len(i)))
encoded = []
for i in x:
encoded.append(np.array(functions.encode(i, max_len)))
return encoded
@staticmethod
def collate_fn(objs: list) -> (torch.LongTensor, torch.Tensor):
data = ([i[0] for i in objs])
labels = ([i[1] for i in objs])
data = torch.LongTensor(data)
labels = torch.tensor(labels)
return data, labels
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
| [
"torch.LongTensor",
"torch.tensor"
] | 1.7.0 | GroupLe/grouple-face-tagger | 5fd87c074dc50a5fc341e9f30774094a1616a87f |
1.7 | import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from utils import utils_sr
import torch
from argparse import ArgumentParser
from utils.utils_restoration import rgb2y, psnr, array2tensor, tensor2array
import sys
from matplotlib.ticker import MaxNLocator
class PnP_restoration():
def __init__(self, hparams):
self.hparams = hparams
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.initialize_cuda_denoiser()
def initialize_cuda_denoiser(self):
'''
Initialize the denoiser model with the given pretrained ckpt
'''
sys.path.append('../GS_denoising/')
from lightning_GSDRUNet import GradMatch
parser2 = ArgumentParser(prog='utils_restoration.py')
parser2 = GradMatch.add_model_specific_args(parser2)
parser2 = GradMatch.add_optim_specific_args(parser2)
hparams = parser2.parse_known_args()[0]
hparams.act_mode = self.hparams.act_mode_denoiser
self.denoiser_model = GradMatch(hparams)
checkpoint = torch.load(self.hparams.pretrained_checkpoint, map_location=self.device)
self.denoiser_model.load_state_dict(checkpoint['state_dict'])
self.denoiser_model.eval()
for i, v in self.denoiser_model.named_parameters():
v.requires_grad = False
self.denoiser_model = self.denoiser_model.to(self.device)
if self.hparams.precision == 'double' :
if self.denoiser_model is not None:
self.denoiser_model.double()
def initialize_prox(self, img, degradation):
'''
calculus for future prox computatations
:param img: degraded image
:param degradation: 2D blur kernel for deblurring and SR, mask for inpainting
'''
if self.hparams.degradation_mode == 'deblurring':
self.k = degradation
self.k_tensor = array2tensor(np.expand_dims(self.k, 2)).double().to(self.device)
self.FB, self.FBC, self.F2B, self.FBFy = utils_sr.pre_calculate(img, self.k_tensor, 1)
elif self.hparams.degradation_mode == 'SR':
self.k = degradation
self.k_tensor = array2tensor(np.expand_dims(self.k, 2)).double().to(self.device)
self.FB, self.FBC, self.F2B, self.FBFy = utils_sr.pre_calculate(img, self.k_tensor, self.hparams.sf)
elif self.hparams.degradation_mode == 'inpainting':
self.M = array2tensor(degradation).double().to(self.device)
self.My = self.M*img
else:
print('degradation mode not treated')
def calculate_prox(self, img):
'''
Calculation of the proximal mapping of the data term f
:param img: input for the prox
:return: prox_f(img)
'''
if self.hparams.degradation_mode == 'deblurring':
rho = torch.tensor([1/self.tau]).double().repeat(1, 1, 1, 1).to(self.device)
px = utils_sr.data_solution(img.double(), self.FB, self.FBC, self.F2B, self.FBFy, rho, 1)
elif self.hparams.degradation_mode == 'SR':
rho = torch.tensor([1 / self.tau]).double().repeat(1, 1, 1, 1).to(self.device)
px = utils_sr.data_solution(img.double(), self.FB, self.FBC, self.F2B, self.FBFy, rho, self.hparams.sf)
elif self.hparams.degradation_mode == 'inpainting':
if self.hparams.noise_level_img > 1e-2:
px = (self.tau*self.My + img)/(self.tau*self.M+1)
else :
px = self.My + (1-self.M)*img
else:
print('degradation mode not treated')
return px
def calculate_F(self,x,s,img):
'''
Calculation of the objective function value f + lamb*s
:param x: Point where to evaluate F
:param s: Precomputed regularization function value
:param img: Degraded image
:return: F(x)
'''
if self.hparams.degradation_mode == 'deblurring':
deg_x = utils_sr.imfilter(x.double(),self.k_tensor[0].double().flip(1).flip(2).expand(3,-1,-1,-1))
F = 0.5 * torch.norm(img - deg_x, p=2) ** 2 + self.hparams.lamb * s
elif self.hparams.degradation_mode == 'SR':
deg_x = utils_sr.imfilter(x.double(), self.k_tensor[0].double().flip(1).flip(2).expand(3, -1, -1, -1))
deg_x = deg_x[...,0::self.hparams.sf, 0::self.hparams.sf]
F = 0.5 * torch.norm(img - deg_x, p=2) ** 2 + self.hparams.lamb * s
elif self.hparams.degradation_mode == 'inpainting':
deg_x = self.M*x.double()
F = 0.5*torch.norm(img - deg_x, p=2) ** 2 + self.hparams.lamb * s
else :
print('degradation not implemented')
return F.item()
def restore(self, img, clean_img, degradation,extract_results=False):
'''
Compute GS-PnP restoration algorithm
:param img: Degraded image
:param clean_img: ground-truth clean image
:param degradation: 2D blur kernel for deblurring and SR, mask for inpainting
:param extract_results: Extract information for subsequent image or curve saving
'''
if extract_results:
z_list, x_list, Dx_list, psnr_tab, s_list, Ds_list, F_list = [], [], [], [], [], [], []
# initalize parameters
if self.hparams.tau is not None:
self.tau = self.hparams.tau
else:
self.tau = 1 / self.hparams.lamb
i = 0 # iteration counter
img_tensor = array2tensor(img).to(self.device) # for GPU computations (if GPU available)
self.initialize_prox(img_tensor, degradation) # prox calculus that can be done outside of the loop
# Initialization of the algorithm
if self.hparams.degradation_mode == 'SR' :
x0 = cv2.resize(img, (img.shape[1] * self.hparams.sf, img.shape[0] * self.hparams.sf),interpolation=cv2.INTER_CUBIC)
x0 = utils_sr.shift_pixel(x0, self.hparams.sf)
x0 = array2tensor(x0).to(self.device)
else:
x0 = img_tensor
x0 = self.calculate_prox(x0)
if extract_results: # extract np images and PSNR values
out_x = tensor2array(x0.cpu())
current_x_psnr = psnr(clean_img, out_x)
if self.hparams.print_each_step:
print('current x PSNR : ', current_x_psnr)
psnr_tab.append(current_x_psnr)
x_list.append(out_x)
x = x0
diff_F = 1
F_old = 1
self.relative_diff_F_min = self.hparams.relative_diff_F_min
while i < self.hparams.maxitr and abs(diff_F)/F_old > self.relative_diff_F_min:
if self.hparams.inpainting_init :
if i < self.hparams.n_init:
self.sigma_denoiser = 50
self.relative_diff_F_min = 0
else :
self.sigma_denoiser = self.hparams.sigma_denoiser
self.relative_diff_F_min = self.hparams.relative_diff_F_min
else :
self.sigma_denoiser = self.hparams.sigma_denoiser
x_old = x
#Denoising of x_old and calculation of F_old
Ds, f = self.denoiser_model.calculate_grad(x_old, self.sigma_denoiser / 255.)
Ds = Ds.detach()
f = f.detach()
Dx = x_old - self.denoiser_model.hparams.weight_Ds * Ds
s_old = 0.5 * (torch.norm(x_old.double() - f.double(), p=2) ** 2)
F_old = self.calculate_F(x_old, s_old, img_tensor)
backtracking_check = False
while not backtracking_check:
# Gradient step
z = (1 - self.hparams.lamb * self.tau) * x_old + self.hparams.lamb * self.tau * Dx
# Proximal step
x = self.calculate_prox(z)
# Calculation of Fnew
f = self.denoiser_model.calculate_grad(x, self.sigma_denoiser / 255.)[1]
f = f.detach()
s = 0.5 * (torch.norm(x.double() - f.double(), p=2) ** 2)
F_new = self.calculate_F(x,s,img_tensor)
# Backtracking
diff_x = (torch.norm(x - x_old, p=2) ** 2).item()
diff_F = F_old - F_new
if self.hparams.degradation_mode == 'inpainting':
diff_F = 1
F_old = 1
if self.hparams.use_backtracking and diff_F < (self.hparams.gamma / self.tau) * diff_x and abs(diff_F)/F_old > self.relative_diff_F_min:
backtracking_check = False
self.tau = self.hparams.eta_tau * self.tau
x = x_old
else:
backtracking_check = True
# Logging
if extract_results:
out_z = tensor2array(z.cpu())
out_x = tensor2array(x.cpu())
current_z_psnr = psnr(clean_img, out_z)
current_x_psnr = psnr(clean_img, out_x)
if self.hparams.print_each_step:
print('iteration : ', i)
print('current z PSNR : ', current_z_psnr)
print('current x PSNR : ', current_x_psnr)
x_list.append(out_x)
z_list.append(out_z)
Dx_list.append(tensor2array(Dx.cpu()))
Ds_list.append(torch.norm(Ds).cpu().item())
s_list.append(s.cpu().item())
F_list.append(F_new)
psnr_tab.append(current_x_psnr)
i += 1 # next iteration
# post-processing gradient step
if extract_results:
Ds, f = self.denoiser_model.calculate_grad(x, self.sigma_denoiser / 255.)
Ds = Ds.detach()
f = f.detach()
Dx = x - self.denoiser_model.hparams.weight_Ds * Ds.detach()
s = 0.5 * (torch.norm(x.double() - f.double(), p=2) ** 2)
else:
Ds, _ = self.denoiser_model.calculate_grad(x, self.sigma_denoiser / 255.)
Ds = Ds.detach()
Dx = x - self.denoiser_model.hparams.weight_Ds * Ds
z = (1 - self.hparams.lamb * self.tau) * x + self.hparams.lamb * self.tau * Dx
if self.hparams.degradation_mode == 'inpainting':
output_img = tensor2array(x.cpu())
else :
output_img = tensor2array(z.cpu())
output_psnr = psnr(clean_img, output_img)
output_psnrY = psnr(rgb2y(clean_img), rgb2y(output_img))
if extract_results:
if self.hparams.print_each_step:
print('current z PSNR : ', output_psnr)
z_list.append(tensor2array(z.cpu()))
Dx_list.append(tensor2array(Dx.cpu()))
Ds_list.append(torch.norm(Ds).cpu().item())
s_list.append(s.cpu().item())
return output_img, output_psnr, output_psnrY, np.array(x_list), np.array(z_list), np.array(Dx_list), np.array(psnr_tab), np.array(Ds_list), np.array(s_list), np.array(F_list)
else:
return output_img, output_psnr, output_psnrY
def initialize_curves(self):
self.rprox = []
self.prox = []
self.conv = []
self.lip_algo = []
self.lip_D = []
self.PSNR = []
self.s = []
self.Ds = []
self.F = []
def update_curves(self, x_list, z_list, Dx_list, psnr_tab, Ds_list, s_list, F_list):
prox_list = x_list
self.F.append(F_list)
self.s.append(s_list)
self.Ds.append(Ds_list)
self.prox.append(np.sqrt(np.array([np.sum(np.abs(prox_list[i + 1] - prox_list[i]) ** 2) for i in range(len(x_list[:-1]))]) / np.array([np.sum(np.abs(z_list[i + 1] - z_list[i]) ** 2) for i in range(len(z_list[:-1]))])))
rprox_list = 2 * prox_list - z_list
self.rprox.append(np.sqrt(np.array([np.sum(np.abs(rprox_list[i + 1] - rprox_list[i]) ** 2) for i in range(len(rprox_list[:-1]))]) / np.array([np.sum(np.abs(z_list[i + 1] - z_list[i]) ** 2) for i in range(len(rprox_list[:-1]))])))
self.conv.append(np.array([np.sum(np.abs(x_list[k + 1] - x_list[k]) ** 2) for k in range(len(x_list) - 1)]) / np.sum(np.abs(x_list[0]) ** 2))
self.lip_algo.append(np.sqrt(np.array([np.sum(np.abs(x_list[k + 1] - x_list[k]) ** 2) for k in range(1, len(x_list) - 1)]) / np.array([np.sum(np.abs(x_list[k] - x_list[k - 1]) ** 2) for k in range(1, len(x_list[:-1]))])))
self.lip_D.append(np.sqrt(np.array([np.sum(np.abs(Dx_list[i + 1] - Dx_list[i]) ** 2) for i in range(len(Dx_list) - 1)]) / np.array([np.sum(np.abs(x_list[i + 1] - x_list[i]) ** 2) for i in range(len(x_list) - 1)])))
self.PSNR.append(psnr_tab)
def save_curves(self, save_path):
import matplotlib
matplotlib.rcParams.update({'font.size': 15})
plt.figure(1)
fig, ax = plt.subplots()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for i in range(len(self.PSNR)):
plt.plot(self.PSNR[i], '*', label='im_' + str(i))
plt.legend()
plt.grid()
plt.savefig(os.path.join(save_path, 'PSNR.png'))
plt.figure(2)
fig, ax = plt.subplots()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for i in range(len(self.F)):
plt.plot(self.F[i], '-o', markersize=10)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig(os.path.join(save_path, 'F.png'))
plt.figure(3)
fig, ax = plt.subplots()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for i in range(len(self.conv)):
plt.plot(self.conv[i], '-o', markersize=10)
plt.semilogy()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig(os.path.join(save_path, 'conv_log.png'), bbox_inches="tight")
self.conv2 = [[np.min(self.conv[i][:k]) for k in range(1, len(self.conv[i]))] for i in range(len(self.conv))]
conv_rate = [self.conv2[i][0]*np.array([(1/k) for k in range(1,len(self.conv2[i]))]) for i in range(len(self.conv2))]
plt.figure(4)
fig, ax = plt.subplots()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for i in range(len(self.conv2)):
plt.plot(self.conv2[i], '-o', markersize=10, label='GS-PnP')
plt.plot(conv_rate[i], '--', color='red', label=r'$\mathcal{O}(\frac{1}{K})$')
plt.semilogy()
plt.legend()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.savefig(os.path.join(save_path, 'conv_log2.png'), bbox_inches="tight")
plt.figure(5)
fig, ax = plt.subplots()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
for i in range(len(self.lip_algo)):
plt.plot(self.lip_algo[i], '-o', label='im_' + str(i))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.grid()
plt.savefig(os.path.join(save_path, 'lip_algo.png'))
plt.figure(6)
for i in range(len(self.lip_D)):
plt.plot(self.lip_D[i], '-o', label='im_' + str(i))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.grid()
plt.savefig(os.path.join(save_path, 'lip_D.png'))
def add_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--denoiser_name', type=str, default='GS-DRUNet')
parser.add_argument('--dataset_path', type=str, default='../datasets')
parser.add_argument('--pretrained_checkpoint', type=str,default='../GS_denoising/ckpts/GSDRUNet.ckpt')
parser.add_argument('--PnP_algo', type=str, default='HQS')
parser.add_argument('--dataset_name', type=str, default='CBSD10')
parser.add_argument('--sigma_denoiser', type=float)
parser.add_argument('--noise_level_img', type=float, default=2.55)
parser.add_argument('--maxitr', type=int, default=400)
parser.add_argument('--lamb', type=float, default=0.1)
parser.add_argument('--tau', type=float)
parser.add_argument('--n_images', type=int, default=68)
parser.add_argument('--weight_Ds', type=float, default=1.)
parser.add_argument('--eta_tau', type=float, default=0.9)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--no_use_backtracking', dest='use_backtracking', action='store_false')
parser.set_defaults(use_backtracking=True)
parser.add_argument('--relative_diff_F_min', type=float, default=1e-6)
parser.add_argument('--inpainting_init', dest='inpainting_init', action='store_true')
parser.set_defaults(inpainting_init=False)
parser.add_argument('--precision', type=str, default='simple')
parser.add_argument('--n_init', type=int, default=10)
parser.add_argument('--patch_size', type=int, default=256)
parser.add_argument('--extract_curves', dest='extract_curves', action='store_true')
parser.set_defaults(extract_curves=False)
parser.add_argument('--extract_images', dest='extract_images', action='store_true')
parser.set_defaults(extract_images=False)
parser.add_argument('--print_each_step', dest='print_each_step', action='store_true')
parser.set_defaults(print_each_step=False)
parser.add_argument('--act_mode_denoiser', type=str, default='E')
return parser
| [
"torch.norm",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
] | 1.7.1 | samuro95/GSPnP | 1aaabf24d2912135da0bdb89cad1cd0846f9649e |
1.4 | # coding=utf-8
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset."""
import argparse
import glob
import json
import logging
import os
import random
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import f1_score
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertModel,
AlbertTokenizer,
BertConfig,
BertModel,
BertTokenizer,
DistilBertConfig,
DistilBertModel,
DistilBertTokenizer,
MMBTConfig,
MMBTForClassification,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
XLMConfig,
XLMModel,
XLMTokenizer,
XLNetConfig,
XLNetModel,
XLNetTokenizer,
get_linear_schedule_with_warmup,
)
from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_transforms, get_mmimdb_labels
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig, DistilBertConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertModel, BertTokenizer),
"xlnet": (XLNetConfig, XLNetModel, XLNetTokenizer),
"xlm": (XLMConfig, XLMModel, XLMTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),
"albert": (AlbertConfig, AlbertModel, AlbertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, criterion):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=collate_fn,
num_workers=args.num_workers,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
best_f1, n_no_improve = 0, 0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
labels = batch[5]
inputs = {
"input_ids": batch[0],
"input_modal": batch[2],
"attention_mask": batch[1],
"modal_start_tokens": batch[3],
"modal_end_tokens": batch[4],
}
outputs = model(**inputs)
logits = outputs[0] # model outputs are always tuple in transformers (see doc)
loss = criterion(logits, labels)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, criterion)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), os.path.join(output_dir, WEIGHTS_NAME))
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank == -1:
results = evaluate(args, model, tokenizer, criterion)
if results["micro_f1"] > best_f1:
best_f1 = results["micro_f1"]
n_no_improve = 0
else:
n_no_improve += 1
if n_no_improve > args.patience:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, criterion, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_examples(args, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn
)
# multi-gpu eval
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
batch = tuple(t.to(args.device) for t in batch)
labels = batch[5]
inputs = {
"input_ids": batch[0],
"input_modal": batch[2],
"attention_mask": batch[1],
"modal_start_tokens": batch[3],
"modal_end_tokens": batch[4],
}
outputs = model(**inputs)
logits = outputs[0] # model outputs are always tuple in transformers (see doc)
tmp_eval_loss = criterion(logits, labels)
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = torch.sigmoid(logits).detach().cpu().numpy() > 0.5
out_label_ids = labels.detach().cpu().numpy()
else:
preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy() > 0.5, axis=0)
out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
result = {
"loss": eval_loss,
"macro_f1": f1_score(out_label_ids, preds, average="macro"),
"micro_f1": f1_score(out_label_ids, preds, average="micro"),
}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def load_examples(args, tokenizer, evaluate=False):
path = os.path.join(args.data_dir, "dev.jsonl" if evaluate else "train.jsonl")
transforms = get_image_transforms()
labels = get_mmimdb_labels()
dataset = JsonlDataset(path, tokenizer, transforms, labels, args.max_seq_length - args.num_image_embeds - 2)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .jsonl files for MMIMDB.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--num_image_embeds", default=1, type=int, help="Number of Image Embeddings from the Image Encoder"
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument("--patience", default=5, type=int, help="Patience for Early Stopping.")
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_workers", type=int, default=8, help="number of worker threads for dataloading")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
# Setup model
labels = get_mmimdb_labels()
num_labels = len(labels)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
transformer_config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
transformer = model_class.from_pretrained(
args.model_name_or_path, config=transformer_config, cache_dir=args.cache_dir if args.cache_dir else None
)
img_encoder = ImageEncoder(args)
config = MMBTConfig(transformer_config, num_labels=num_labels)
model = MMBTForClassification(config, transformer, img_encoder)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_examples(args, tokenizer, evaluate=False)
label_frequences = train_dataset.get_label_frequencies()
label_frequences = [label_frequences[l] for l in labels]
label_weights = (
torch.tensor(label_frequences, device=args.device, dtype=torch.float) / len(train_dataset)
) ** -1
criterion = nn.BCEWithLogitsLoss(pos_weight=label_weights)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, criterion)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), os.path.join(args.output_dir, WEIGHTS_NAME))
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = MMBTForClassification(config, transformer, img_encoder)
model.load_state_dict(torch.load(os.path.join(args.output_dir, WEIGHTS_NAME)))
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = MMBTForClassification(config, transformer, img_encoder)
model.load_state_dict(torch.load(checkpoint))
model.to(args.device)
result = evaluate(args, model, tokenizer, criterion, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| [
"torch.distributed.get_world_size",
"torch.utils.data.RandomSampler",
"torch.cuda.is_available",
"torch.nn.BCEWithLogitsLoss",
"torch.load",
"torch.nn.DataParallel",
"torch.sigmoid",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.barrier",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.4.0 | 12190143/transformers | ab90353f1abfd15f8d21f99395658d060679a08c |
1.4 | # coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLM model.
"""
import itertools
import logging
import math
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from .activations import gelu
from .configuration_xlm import XLMConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead, prune_linear_layer
logger = logging.getLogger(__name__)
XLM_PRETRAINED_MODEL_ARCHIVE_MAP = {
"xlm-mlm-en-2048": "https://cdn.huggingface.co/xlm-mlm-en-2048-pytorch_model.bin",
"xlm-mlm-ende-1024": "https://cdn.huggingface.co/xlm-mlm-ende-1024-pytorch_model.bin",
"xlm-mlm-enfr-1024": "https://cdn.huggingface.co/xlm-mlm-enfr-1024-pytorch_model.bin",
"xlm-mlm-enro-1024": "https://cdn.huggingface.co/xlm-mlm-enro-1024-pytorch_model.bin",
"xlm-mlm-tlm-xnli15-1024": "https://cdn.huggingface.co/xlm-mlm-tlm-xnli15-1024-pytorch_model.bin",
"xlm-mlm-xnli15-1024": "https://cdn.huggingface.co/xlm-mlm-xnli15-1024-pytorch_model.bin",
"xlm-clm-enfr-1024": "https://cdn.huggingface.co/xlm-clm-enfr-1024-pytorch_model.bin",
"xlm-clm-ende-1024": "https://cdn.huggingface.co/xlm-clm-ende-1024-pytorch_model.bin",
"xlm-mlm-17-1280": "https://cdn.huggingface.co/xlm-mlm-17-1280-pytorch_model.bin",
"xlm-mlm-100-1280": "https://cdn.huggingface.co/xlm-mlm-100-1280-pytorch_model.bin",
}
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
def get_masks(slen, lengths, causal, padding_mask=None):
"""
Generate hidden states mask, and optionally an attention mask.
"""
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
if padding_mask is not None:
mask = padding_mask
else:
assert lengths.max().item() <= slen
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
bs = lengths.size(0)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, config):
super().__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.output_attentions = config.output_attentions
self.dim = dim
self.n_heads = n_heads
self.dropout = config.attention_dropout
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
self.out_lin = nn.Linear(dim, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
mask = torch.ones(self.n_heads, attention_head_size)
heads = set(heads) - self.pruned_heads
for head in heads:
head -= sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input, mask, kv=None, cache=None, head_mask=None):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = kv.size(1)
# assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)
n_heads = self.n_heads
dim_per_head = self.dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, qlen, klen)
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
outputs = (self.out_lin(context),)
if self.output_attentions:
outputs = outputs + (weights,)
return outputs
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, config):
super().__init__()
self.dropout = config.dropout
self.lin1 = nn.Linear(in_dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, out_dim)
self.act = gelu if config.gelu_activation else F.relu
def forward(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class XLMPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLMConfig
pretrained_model_archive_map = XLM_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = None
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
@property
def dummy_inputs(self):
inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
def _init_weights(self, module):
""" Initialize the weights. """
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
if hasattr(module, "bias") and module.bias is not None:
nn.init.constant_(module.bias, 0.0)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
XLM_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
langs (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
A parallel sequence of tokens to be used to indicate the language of each token in the input.
Indices are languages ids which can be obtained from the language names by using two conversion mappings
provided in the configuration of the model (only provided for multilingual models).
More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and
the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).
See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__.
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
lengths (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Length of each sentence that can be used to avoid performing attention on padding token indices.
You can also use `attention_mask` for the same result (see above), kept here for compatbility.
Indices selected in ``[0, ..., input_ids.size(-1)]``:
cache (:obj:`Dict[str, torch.FloatTensor]`, `optional`, defaults to :obj:`None`):
dictionary with ``torch.FloatTensor`` that contains pre-computed
hidden-states (key and values in the attention blocks) as computed by the model
(see `cache` output below). Can be used to speed up sequential decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare XLM Model transformer outputting raw hidden-states without any specific head on top.",
XLM_START_DOCSTRING,
)
class XLMModel(XLMPreTrainedModel):
def __init__(self, config): # , dico, is_encoder, with_output):
super().__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# encoder / decoder, output layer
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError("Currently XLM can only be used as an encoder")
# self.with_output = with_output
self.causal = config.causal
# dictionary / languages
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
# self.dico = dico
# self.id2lang = config.id2lang
# self.lang2id = config.lang2id
# assert len(self.dico) == self.n_words
# assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = config.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = config.n_heads # 8 by default
self.n_layers = config.n_layers
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
# embeddings
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
if config.sinusoidal_embeddings:
create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
# if self.is_decoder:
# self.layer_norm15 = nn.ModuleList()
# self.encoder_attn = nn.ModuleList()
for _ in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
if hasattr(config, "pruned_heads"):
pruned_heads = config.pruned_heads.copy().items()
config.pruned_heads = {}
for layer, heads in pruned_heads:
if self.attentions[int(layer)].n_heads == config.n_heads:
self.prune_heads({int(layer): list(map(int, heads))})
self.init_weights()
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.attentions[layer].prune_heads(heads)
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMModel
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMModel.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
if input_ids is not None:
bs, slen = input_ids.size()
else:
bs, slen = inputs_embeds.size()[:-1]
if lengths is None:
if input_ids is not None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
else:
lengths = torch.LongTensor([slen] * bs)
# mask = input_ids != self.pad_index
# check inputs
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
device = input_ids.device if input_ids is not None else inputs_embeds.device
# position_ids
if position_ids is None:
position_ids = torch.arange(slen, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand((bs, slen))
else:
assert position_ids.size() == (bs, slen) # (slen, bs)
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (bs, slen) # (slen, bs)
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layers)
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
if langs is not None and self.use_lang_emb and self.n_langs > 1:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = F.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
hidden_states = ()
attentions = ()
for i in range(self.n_layers):
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i])
attn = attn_outputs[0]
if self.output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = F.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = F.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# Add last hidden state
if self.output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
outputs = (tensor,)
if self.output_hidden_states:
outputs = outputs + (hidden_states,)
if self.output_attentions:
outputs = outputs + (attentions,)
return outputs # outputs, (hidden_states), (attentions)
class XLMPredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config):
super().__init__()
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
dim = config.emb_dim
if config.asm is False:
self.proj = nn.Linear(dim, config.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(
in_features=dim,
n_classes=config.n_words,
cutoffs=config.asm_cutoffs,
div_value=config.asm_div_value,
head_bias=True, # default is False
)
def forward(self, x, y=None):
""" Compute the loss, and optionally the scores.
"""
outputs = ()
if self.asm is False:
scores = self.proj(x)
outputs = (scores,) + outputs
if y is not None:
loss = F.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="elementwise_mean")
outputs = (loss,) + outputs
else:
scores = self.proj.log_prob(x)
outputs = (scores,) + outputs
if y is not None:
_, loss = self.proj(x, y)
outputs = (loss,) + outputs
return outputs
@add_start_docstrings(
"""The XLM Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLM_START_DOCSTRING,
)
class XLMWithLMHeadModel(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.pred_layer = XLMPredLayer(config)
self.init_weights()
def get_output_embeddings(self):
return self.pred_layer.proj
def prepare_inputs_for_generation(self, input_ids, **kwargs):
mask_token_id = self.config.mask_token_id
lang_id = self.config.lang_id
effective_batch_size = input_ids.shape[0]
mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, mask_token], dim=1)
if lang_id is not None:
langs = torch.full_like(input_ids, lang_id)
else:
langs = None
return {"input_ids": input_ids, "langs": langs}
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMWithLMHeadModel
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
outputs = self.pred_layer(output, labels)
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLM_START_DOCSTRING,
)
class XLMForSequenceClassification(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLMModel(config)
self.sequence_summary = SequenceSummary(config)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForSequenceClassification
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs
@add_start_docstrings(
"""XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLM_START_DOCSTRING,
)
class XLMForQuestionAnsweringSimple(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForQuestionAnsweringSimple
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (
start_logits,
end_logits,
)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLM_START_DOCSTRING,
)
class XLMForQuestionAnswering(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = SQuADHead(config)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
lengths=None,
cache=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForQuestionAnswering
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
start_positions = torch.tensor([1])
end_positions = torch.tensor([3])
outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
output = transformer_outputs[0]
outputs = self.qa_outputs(
output,
start_positions=start_positions,
end_positions=end_positions,
cls_index=cls_index,
is_impossible=is_impossible,
p_mask=p_mask,
)
outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
return outputs
@add_start_docstrings(
"""XLM Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLM_START_DOCSTRING,
)
class XLMForTokenClassification(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLMModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
langs=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLMTokenizer, XLMForTokenClassification
import torch
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-100-1280')
model = XLMForTokenClassification.from_pretrained('xlm-mlm-100-1280')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.AdaptiveLogSoftmaxWithLoss",
"torch.nn.ModuleList",
"torch.ones",
"torch.LongTensor",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.functional.dropout",
"torch.full_like",
"torch.full",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Embedding"
] | 1.4.0 | 12190143/transformers | ab90353f1abfd15f8d21f99395658d060679a08c |
1.3 | import os
import torch
from osgeo import gdal
import numpy as np
from warnings import warn
from .model_io import get_model
from .transform import process_aug_dict
from .datagen import InferenceTiler
from ..raster.image import stitch_images, create_multiband_geotiff
from ..utils.core import get_data_paths
class Inferer(object):
"""Object for training `solaris` models using PyTorch or Keras."""
def __init__(self, config, custom_model_dict=None):
self.config = config
self.batch_size = self.config['batch_size']
self.framework = self.config['nn_framework']
self.model_name = self.config['model_name']
# check if the model was trained as part of the same pipeline; if so,
# use the output from that. If not, use the pre-trained model directly.
if self.config['train']:
warn('Because the configuration specifies both training and '
'inference, solaris is switching the model weights path '
'to the training output path.')
self.model_path = self.config['training']['model_dest_path']
if custom_model_dict is not None:
custom_model_dict['weight_path'] = self.config[
'training']['model_dest_path']
else:
self.model_path = self.config.get('model_path', None)
self.model = get_model(self.model_name, self.framework,
self.model_path, pretrained=True,
custom_model_dict=custom_model_dict)
self.window_step_x = self.config['inference'].get('window_step_size_x',
None)
self.window_step_y = self.config['inference'].get('window_step_size_y',
None)
if self.window_step_x is None:
self.window_step_x = self.config['data_specs']['width']
if self.window_step_y is None:
self.window_step_y = self.config['data_specs']['height']
self.stitching_method = self.config['inference'].get(
'stitching_method', 'average')
self.output_dir = self.config['inference']['output_dir']
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
def __call__(self, infer_df=None):
"""Run inference.
Arguments
---------
infer_df : :class:`pandas.DataFrame` or `str`
A :class:`pandas.DataFrame` with a column, ``'image'``, specifying
paths to images for inference. Alternatively, `infer_df` can be a
path to a CSV file containing the same information. Defaults to
``None``, in which case the file path specified in the Inferer's
configuration dict is used.
"""
if infer_df is None:
infer_df = get_infer_df(self.config)
inf_tiler = InferenceTiler(
self.framework,
width=self.config['data_specs']['width'],
height=self.config['data_specs']['height'],
x_step=self.window_step_x,
y_step=self.window_step_y,
augmentations=process_aug_dict(
self.config['inference_augmentation']))
for idx, im_path in enumerate(infer_df['image']):
temp_im = gdal.Open(im_path)
proj = temp_im.GetProjection()
gt = temp_im.GetGeoTransform()
inf_input, idx_refs, (
src_im_height, src_im_width) = inf_tiler(im_path)
if self.framework == 'keras':
subarr_preds = self.model.predict(inf_input,
batch_size=self.batch_size)
elif self.framework in ['torch', 'pytorch']:
with torch.no_grad():
self.model.eval()
if torch.cuda.is_available():
device = torch.device('cuda')
self.model = self.model.cuda()
else:
device = torch.device('cpu')
inf_input = torch.from_numpy(inf_input).float().to(device)
# add additional input data, if applicable
if self.config['data_specs'].get('additional_inputs',
None) is not None:
inf_input = [inf_input]
for i in self.config['data_specs']['additional_inputs']:
inf_input.append(
infer_df[i].iloc[idx].to(device))
subarr_preds = self.model(inf_input)
subarr_preds = subarr_preds.cpu().data.numpy()
stitched_result = stitch_images(subarr_preds,
idx_refs=idx_refs,
out_width=src_im_width,
out_height=src_im_height,
method=self.stitching_method)
stitched_result = np.swapaxes(stitched_result, 1, 0)
stitched_result = np.swapaxes(stitched_result, 2, 0)
create_multiband_geotiff(stitched_result,
os.path.join(self.output_dir,
os.path.split(im_path)[1]),
proj=proj, geo=gt, nodata=np.nan,
out_format=gdal.GDT_Float32)
def get_infer_df(config):
"""Get the inference df based on the contents of ``config`` .
This function uses the logic described in the documentation for the config
file to determine where to find images to be used for inference.
See the docs and the comments in solaris/data/config_skeleton.yml for
details.
Arguments
---------
config : dict
The loaded configuration dict for model training and/or inference.
Returns
-------
infer_df : :class:`dict`
:class:`dict` containing at least one column: ``'image'`` . The values
in this column correspond to the path to filenames to perform inference
on.
"""
infer_df = get_data_paths(config['inference_data_csv'], infer=True)
return infer_df
| [
"torch.device",
"torch.no_grad",
"torch.from_numpy",
"torch.cuda.is_available"
] | 1.3.1 | sandhi-artha/solaris | 230a58f94f300062ee880d43920d218edf3321c4 |
1.1 | import random
import time
from collections import namedtuple
import pytest
import torch
import numpy as np
from easydict import EasyDict
from functools import partial
import gym
from ding.envs.env.base_env import BaseEnvTimestep
from ding.envs.env_manager.base_env_manager import EnvState
from ding.envs.env_manager import BaseEnvManager, SyncSubprocessEnvManager, AsyncSubprocessEnvManager
from ding.torch_utils import to_tensor, to_ndarray, to_list
from ding.utils import deep_merge_dicts
class FakeEnv(object):
def __init__(self, cfg):
self._scale = cfg.scale
self._target_time = random.randint(3, 6) * self._scale
self._current_time = 0
self._name = cfg['name']
self._id = time.time()
self._stat = None
self._seed = 0
self._data_count = 0
self.timeout_flag = False
self._launched = False
self._state = EnvState.INIT
self._dead_once = False
self.observation_space = gym.spaces.Box(
low=np.array([-1.0, -1.0, -8.0]), high=np.array([1.0, 1.0, 8.0]), shape=(3, ), dtype=np.float32
)
self.action_space = gym.spaces.Box(low=-2.0, high=2.0, shape=(1, ), dtype=np.float32)
self.reward_space = gym.spaces.Box(
low=-1 * (3.14 * 3.14 + 0.1 * 8 * 8 + 0.001 * 2 * 2), high=0.0, shape=(1, ), dtype=np.float32
)
def reset(self, stat=None):
if isinstance(stat, str) and stat == 'error':
self.dead()
if isinstance(stat, str) and stat == 'error_once':
# Die on every two reset with error_once stat.
if self._dead_once:
self._dead_once = False
self.dead()
else:
self._dead_once = True
if isinstance(stat, str) and stat == "wait":
if self.timeout_flag: # after step(), the reset can hall with status of timeout
time.sleep(5)
if isinstance(stat, str) and stat == "block":
self.block()
self._launched = True
self._current_time = 0
self._stat = stat
self._state = EnvState.RUN
return to_ndarray(torch.randn(3))
def step(self, action):
assert self._launched
assert not self._state == EnvState.ERROR
self.timeout_flag = True # after one step, enable timeout flag
if isinstance(action, str) and action == 'error':
self.dead()
if isinstance(action, str) and action == 'catched_error':
return BaseEnvTimestep(None, None, True, {'abnormal': True})
if isinstance(action, str) and action == "wait":
if self.timeout_flag: # after step(), the reset can hall with status of timeout
time.sleep(3)
if isinstance(action, str) and action == 'block':
self.block()
obs = to_ndarray(torch.randn(3))
reward = to_ndarray(torch.randint(0, 2, size=[1]).numpy())
done = self._current_time >= self._target_time
if done:
self._state = EnvState.DONE
simulation_time = random.uniform(0.5, 1) * self._scale
info = {'name': self._name, 'time': simulation_time, 'tgt': self._target_time, 'cur': self._current_time}
time.sleep(simulation_time)
self._current_time += simulation_time
self._data_count += 1
return BaseEnvTimestep(obs, reward, done, info)
def dead(self):
self._state = EnvState.ERROR
raise RuntimeError("env error, current time {}".format(self._current_time))
def block(self):
self._state = EnvState.ERROR
time.sleep(1000)
def close(self):
self._launched = False
self._state = EnvState.INIT
def seed(self, seed):
self._seed = seed
@property
def name(self):
return self._name
@property
def time_id(self):
return self._id
def user_defined(self):
pass
def __repr__(self):
return self._name
class FakeAsyncEnv(FakeEnv):
def reset(self, stat=None):
super().reset(stat)
time.sleep(random.randint(1, 3) * self._scale)
return to_ndarray(torch.randn(3))
class FakeGymEnv(FakeEnv):
def __init__(self, cfg):
super().__init__(cfg)
self.metadata = "fake metadata"
self.action_space = gym.spaces.Box(low=-2.0, high=2.0, shape=(4, ), dtype=np.float32)
def random_action(self) -> np.ndarray:
random_action = self.action_space.sample()
if isinstance(random_action, np.ndarray):
pass
elif isinstance(random_action, int):
random_action = to_ndarray([random_action], dtype=np.int64)
elif isinstance(random_action, dict):
random_action = to_ndarray(random_action)
else:
raise TypeError(
'`random_action` should be either int/np.ndarray or dict of int/np.ndarray, but get {}: {}'.format(
type(random_action), random_action
)
)
return random_action
class FakeModel(object):
def forward(self, obs):
if random.random() > 0.5:
return {k: [] for k in obs}
else:
env_num = len(obs)
exec_env = random.randint(1, env_num + 1)
keys = list(obs.keys())[:exec_env]
return {k: [] for k in keys}
@pytest.fixture(scope='class')
def setup_model_type():
return FakeModel
def get_base_manager_cfg(env_num=4):
manager_cfg = {
'env_cfg': [{
'name': 'name{}'.format(i),
'scale': 1.0,
} for i in range(env_num)],
'episode_num': 2,
'reset_timeout': 10,
'step_timeout': 8,
'max_retry': 5,
}
return EasyDict(manager_cfg)
def get_subprecess_manager_cfg(env_num=4):
manager_cfg = {
'env_cfg': [{
'name': 'name{}'.format(i),
'scale': 1.0,
} for i in range(env_num)],
'episode_num': 2,
#'step_timeout': 8,
#'reset_timeout': 10,
'connect_timeout': 8,
'step_timeout': 5,
'max_retry': 2,
}
return EasyDict(manager_cfg)
def get_gym_vector_manager_cfg(env_num=4):
manager_cfg = {
'env_cfg': [{
'name': 'name{}'.format(i),
} for i in range(env_num)],
'episode_num': 2,
'connect_timeout': 8,
'step_timeout': 5,
'max_retry': 2,
'share_memory': True
}
return EasyDict(manager_cfg)
@pytest.fixture(scope='function')
def setup_base_manager_cfg():
manager_cfg = get_base_manager_cfg(4)
env_cfg = manager_cfg.pop('env_cfg')
manager_cfg['env_fn'] = [partial(FakeEnv, cfg=c) for c in env_cfg]
return deep_merge_dicts(BaseEnvManager.default_config(), EasyDict(manager_cfg))
@pytest.fixture(scope='function')
def setup_fast_base_manager_cfg():
manager_cfg = get_base_manager_cfg(4)
env_cfg = manager_cfg.pop('env_cfg')
for e in env_cfg:
e['scale'] = 0.1
manager_cfg['env_fn'] = [partial(FakeEnv, cfg=c) for c in env_cfg]
return deep_merge_dicts(BaseEnvManager.default_config(), EasyDict(manager_cfg))
@pytest.fixture(scope='function')
def setup_sync_manager_cfg():
manager_cfg = get_subprecess_manager_cfg(4)
env_cfg = manager_cfg.pop('env_cfg')
# TODO(nyz) test fail when shared_memory = True
manager_cfg['shared_memory'] = False
manager_cfg['env_fn'] = [partial(FakeEnv, cfg=c) for c in env_cfg]
return deep_merge_dicts(SyncSubprocessEnvManager.default_config(), EasyDict(manager_cfg))
@pytest.fixture(scope='function')
def setup_async_manager_cfg():
manager_cfg = get_subprecess_manager_cfg(4)
env_cfg = manager_cfg.pop('env_cfg')
manager_cfg['env_fn'] = [partial(FakeAsyncEnv, cfg=c) for c in env_cfg]
manager_cfg['shared_memory'] = False
return deep_merge_dicts(AsyncSubprocessEnvManager.default_config(), EasyDict(manager_cfg))
@pytest.fixture(scope='function')
def setup_gym_vector_manager_cfg():
manager_cfg = get_subprecess_manager_cfg(4)
env_cfg = manager_cfg.pop('env_cfg')
manager_cfg['env_fn'] = [partial(FakeGymEnv, cfg=c) for c in env_cfg]
manager_cfg['shared_memory'] = False
return EasyDict(manager_cfg)
| [
"torch.randint",
"torch.randn"
] | 1.1.0 | song2181/DI-engine | 268d77db3cb54401b2cfc83e2bc3ec87c31e7b83 |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from contextlib import contextmanager
from typing import Any, Dict, Generator, Iterator, Mapping, Optional, Sequence, Tuple
import torch
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.types import STEP_OUTPUT
def check_finite_loss(loss: Optional[torch.Tensor]) -> None:
"""Checks for finite loss value.
Args:
loss: the loss value to check to be finite
"""
if loss is not None and not torch.isfinite(loss).all():
raise ValueError(f"The loss returned in `training_step` is {loss}.")
def _check_training_step_output(model: "pl.LightningModule", training_step_output: STEP_OUTPUT) -> None:
"""Sanity checks that training produced a valid output and optimizer step has already been called in manual
optimization.
Args:
model: a reference to the trainer
training_step_output: the output of the training step (before wrapping in an AttributeDict)
"""
if isinstance(training_step_output, torch.Tensor) and not model.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
elif model.automatic_optimization:
if not any(
(
isinstance(training_step_output, torch.Tensor),
(isinstance(training_step_output, Mapping) and "loss" in training_step_output),
training_step_output is None,
)
):
raise MisconfigurationException(
"In automatic optimization, `training_step` must either return a Tensor, "
"a dict with key 'loss' or None (where the step will be skipped)."
)
def _process_training_step_output(
trainer: "pl.Trainer", training_step_output: STEP_OUTPUT
) -> Tuple[Optional[ResultCollection], Optional[Any]]:
"""Adds the :param:`training_step_output` to the trainer's results.
Args:
trainer: a reference to the trainer
training_step_output: the output of the training step (before wrapping into an AttributeDict)
Returns:
the updated results (None if the training_step's output was None) and hiddens exract from the results
"""
if training_step_output is None:
return None, None
results = trainer._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
# this should not modify the `training_step_output`, as the user could be using it after `training_step_end`
loss = training_step_output.get("loss")
hiddens = training_step_output.get("hiddens")
# detach hiddens to avoid `RuntimeError: Trying to backward through the graph a second time`
hiddens = apply_to_collection(hiddens, torch.Tensor, lambda t: t.detach())
# use the setter instead of `dict.update` because it calls `detach` on the tensor items
results.extra = {k: v for k, v in training_step_output.items() if k not in ("loss", "hiddens")}
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
if trainer.terminate_on_nan:
check_finite_loss(loss)
# the loss shouldn't be moved to cpu.
if trainer.move_metrics_to_cpu:
results.cpu()
# map to results under the hood
results.minimize = loss
return results, hiddens
def _build_training_step_kwargs(
lightning_module: "pl.LightningModule",
optimizers: Sequence[Optimizer],
batch: Any,
batch_idx: int,
opt_idx: Optional[int],
hiddens: Optional[Any],
) -> Dict[str, Any]:
"""Builds the keyword arguments for training_step.
Args:
lightning_module: the LightningModule with a `training_step` hook implementation
optimizers: the list of optimizers from the Trainer
batch: the batch to train on
batch_idx: the index of the current batch
opt_idx: the index of the current optimizer
hiddens: the hidden state of the previous RNN iteration
Returns:
the keyword arguments for the training step
"""
# enable not needing to add opt_idx to training_step
step_kwargs = OrderedDict([("batch", batch)])
training_step_fx = getattr(lightning_module, "training_step")
if is_param_in_hook_signature(training_step_fx, "batch_idx", min_args=2):
step_kwargs["batch_idx"] = batch_idx
if len(optimizers) > 1:
has_opt_idx_in_train_step = is_param_in_hook_signature(training_step_fx, "optimizer_idx")
if has_opt_idx_in_train_step:
if not lightning_module.automatic_optimization:
raise ValueError(
"Your `LightningModule.training_step` signature contains an `optimizer_idx` argument but"
" in manual optimization optimizers must be handled by the user. Remove the optimizer_idx"
" argument or set `self.automatic_optimization = True`."
)
step_kwargs["optimizer_idx"] = opt_idx
elif not has_opt_idx_in_train_step and lightning_module.automatic_optimization:
raise ValueError(
f"Your LightningModule defines {len(optimizers)} optimizers but"
" `training_step` is missing the `optimizer_idx` argument."
)
# pass hiddens if using tbptt
if lightning_module.truncated_bptt_steps > 0:
step_kwargs["hiddens"] = hiddens
return step_kwargs
def _prepare_dataloader_iter(data_fetcher: AbstractDataFetcher, batch_idx: int) -> Iterator:
"""Attach the dataloader."""
if not isinstance(data_fetcher, DataLoaderIterDataFetcher):
# restore iteration
dataloader_iter = enumerate(data_fetcher, batch_idx)
else:
dataloader_iter = iter(data_fetcher)
return dataloader_iter
@contextmanager
def _block_parallel_sync_behavior(trainer: "pl.Trainer", block: bool = True) -> Generator[None, None, None]:
"""Blocks synchronization in :class:`~pytorch_lightning.plugins.training_type.parallel.ParallelPlugin`. This is
useful for example when when accumulating gradients to reduce communication when it is not needed.
Args:
trainer: the trainer instance with a reference to a training type plugin
block: whether the context manager is enabled or not
Returns:
context manager with sync behaviour off
"""
if isinstance(trainer.training_type_plugin, ParallelPlugin) and block:
with trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
| [
"torch.isfinite"
] | 1.6 | aphedges/pytorch-lightning | 160e7e128909abc8489261287a562777cf1ada02 |
1.2 | """
A stacked bidirectional LSTM with skip connections between layers.
"""
from typing import Optional, Tuple, List
import warnings
import torch
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import h5py
import numpy
from allennlp.modules.lstm_cell_with_projection import LstmCellWithProjection
from allennlp.common.checks import ConfigurationError
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.common.file_utils import cached_path
class ElmoLstm(_EncoderBase):
"""
A stacked, bidirectional LSTM which uses
[`LstmCellWithProjection`'s](./lstm_cell_with_projection.md)
with highway layers between the inputs to layers.
The inputs to the forward and backward directions are independent - forward and backward
states are not concatenated between layers.
Additionally, this LSTM maintains its `own` state, which is updated every time
`forward` is called. It is dynamically resized for different batch sizes and is
designed for use with non-continuous inputs (i.e inputs which aren't formatted as a stream,
such as text used for a language modeling task, which is how stateful RNNs are typically used).
This is non-standard, but can be thought of as having an "end of sentence" state, which is
carried across different sentences.
# Parameters
input_size : `int`, required
The dimension of the inputs to the LSTM.
hidden_size : `int`, required
The dimension of the outputs of the LSTM.
cell_size : `int`, required.
The dimension of the memory cell of the `LstmCellWithProjection`.
num_layers : `int`, required
The number of bidirectional LSTMs to use.
requires_grad : `bool`, optional
If True, compute gradient of ELMo parameters for fine tuning.
recurrent_dropout_probability : `float`, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks]
(https://arxiv.org/abs/1512.05287).
state_projection_clip_value : `float`, optional, (default = None)
The magnitude with which to clip the hidden_state after projecting it.
memory_cell_clip_value : `float`, optional, (default = None)
The magnitude with which to clip the memory cell.
"""
def __init__(
self,
input_size: int,
hidden_size: int,
cell_size: int,
num_layers: int,
requires_grad: bool = False,
recurrent_dropout_probability: float = 0.0,
memory_cell_clip_value: Optional[float] = None,
state_projection_clip_value: Optional[float] = None,
) -> None:
super().__init__(stateful=True)
# Required to be wrapped with a `PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.cell_size = cell_size
self.requires_grad = requires_grad
forward_layers = []
backward_layers = []
lstm_input_size = input_size
go_forward = True
for layer_index in range(num_layers):
forward_layer = LstmCellWithProjection(
lstm_input_size,
hidden_size,
cell_size,
go_forward,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value,
)
backward_layer = LstmCellWithProjection(
lstm_input_size,
hidden_size,
cell_size,
not go_forward,
recurrent_dropout_probability,
memory_cell_clip_value,
state_projection_clip_value,
)
lstm_input_size = hidden_size
self.add_module("forward_layer_{}".format(layer_index), forward_layer)
self.add_module("backward_layer_{}".format(layer_index), backward_layer)
forward_layers.append(forward_layer)
backward_layers.append(backward_layer)
self.forward_layers = forward_layers
self.backward_layers = backward_layers
def forward(self, inputs: torch.Tensor, mask: torch.LongTensor) -> torch.Tensor:
"""
# Parameters
inputs : `torch.Tensor`, required.
A Tensor of shape `(batch_size, sequence_length, hidden_size)`.
mask : `torch.LongTensor`, required.
A binary mask of shape `(batch_size, sequence_length)` representing the
non-padded elements in each sequence in the batch.
# Returns
A `torch.Tensor` of shape (num_layers, batch_size, sequence_length, hidden_size),
where the num_layers dimension represents the LSTM output from that layer.
"""
batch_size, total_sequence_length = mask.size()
stacked_sequence_output, final_states, restoration_indices = self.sort_and_run_forward(
self._lstm_forward, inputs, mask
)
num_layers, num_valid, returned_timesteps, encoder_dim = stacked_sequence_output.size()
# Add back invalid rows which were removed in the call to sort_and_run_forward.
if num_valid < batch_size:
zeros = stacked_sequence_output.new_zeros(
num_layers, batch_size - num_valid, returned_timesteps, encoder_dim
)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 1)
# The states also need to have invalid rows added back.
new_states = []
for state in final_states:
state_dim = state.size(-1)
zeros = state.new_zeros(num_layers, batch_size - num_valid, state_dim)
new_states.append(torch.cat([state, zeros], 1))
final_states = new_states
# It's possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2StackEncoder. However, packing and unpacking
# the sequences mean that the returned tensor won't include these dimensions, because
# the RNN did not need to process them. We add them back on in the form of zeros here.
sequence_length_difference = total_sequence_length - returned_timesteps
if sequence_length_difference > 0:
zeros = stacked_sequence_output.new_zeros(
num_layers,
batch_size,
sequence_length_difference,
stacked_sequence_output[0].size(-1),
)
stacked_sequence_output = torch.cat([stacked_sequence_output, zeros], 2)
self._update_states(final_states, restoration_indices)
# Restore the original indices and return the sequence.
# Has shape (num_layers, batch_size, sequence_length, hidden_size)
return stacked_sequence_output.index_select(1, restoration_indices)
def _lstm_forward(
self,
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
# Parameters
inputs : `PackedSequence`, required.
A batch first `PackedSequence` to run the stacked LSTM over.
initial_state : `Tuple[torch.Tensor, torch.Tensor]`, optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and
(num_layers, batch_size, 2 * cell_size) respectively.
# Returns
output_sequence : `torch.FloatTensor`
The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)
final_states : `Tuple[torch.FloatTensor, torch.FloatTensor]`
The per-layer final (state, memory) states of the LSTM, with shape
(num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)
respectively. The last dimension is duplicated because it contains the state/memory
for both the forward and backward layers.
"""
if initial_state is None:
hidden_states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None] * len(
self.forward_layers
)
elif initial_state[0].size()[0] != len(self.forward_layers):
raise ConfigurationError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
forward_output_sequence = inputs
backward_output_sequence = inputs
final_states = []
sequence_outputs = []
for layer_index, state in enumerate(hidden_states):
forward_layer = getattr(self, "forward_layer_{}".format(layer_index))
backward_layer = getattr(self, "backward_layer_{}".format(layer_index))
forward_cache = forward_output_sequence
backward_cache = backward_output_sequence
if state is not None:
forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2)
forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2)
forward_state = (forward_hidden_state, forward_memory_state)
backward_state = (backward_hidden_state, backward_memory_state)
else:
forward_state = None
backward_state = None
forward_output_sequence, forward_state = forward_layer(
forward_output_sequence, batch_lengths, forward_state
)
backward_output_sequence, backward_state = backward_layer(
backward_output_sequence, batch_lengths, backward_state
)
# Skip connections, just adding the input to the output.
if layer_index != 0:
forward_output_sequence += forward_cache
backward_output_sequence += backward_cache
sequence_outputs.append(
torch.cat([forward_output_sequence, backward_output_sequence], -1)
)
# Append the state tuples in a list, so that we can return
# the final states for all the layers.
final_states.append(
(
torch.cat([forward_state[0], backward_state[0]], -1),
torch.cat([forward_state[1], backward_state[1]], -1),
)
)
stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)
# Stack the hidden state and memory for each layer into 2 tensors of shape
# (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)
# respectively.
final_hidden_states, final_memory_states = zip(*final_states)
final_state_tuple: Tuple[torch.FloatTensor, torch.FloatTensor] = (
torch.cat(final_hidden_states, 0),
torch.cat(final_memory_states, 0),
)
return stacked_sequence_outputs, final_state_tuple
def load_weights(self, weight_file: str) -> None:
"""
Load the pre-trained weights from the file.
"""
requires_grad = self.requires_grad
with h5py.File(cached_path(weight_file), "r") as fin:
for i_layer, lstms in enumerate(zip(self.forward_layers, self.backward_layers)):
for j_direction, lstm in enumerate(lstms):
# lstm is an instance of LSTMCellWithProjection
cell_size = lstm.cell_size
dataset = fin["RNN_%s" % j_direction]["RNN"]["MultiRNNCell"][
"Cell%s" % i_layer
]["LSTMCell"]
# tensorflow packs together both W and U matrices into one matrix,
# but pytorch maintains individual matrices. In addition, tensorflow
# packs the gates as input, memory, forget, output but pytorch
# uses input, forget, memory, output. So we need to modify the weights.
tf_weights = numpy.transpose(dataset["W_0"][...])
torch_weights = tf_weights.copy()
# split the W from U matrices
input_size = lstm.input_size
input_weights = torch_weights[:, :input_size]
recurrent_weights = torch_weights[:, input_size:]
tf_input_weights = tf_weights[:, :input_size]
tf_recurrent_weights = tf_weights[:, input_size:]
# handle the different gate order convention
for torch_w, tf_w in [
[input_weights, tf_input_weights],
[recurrent_weights, tf_recurrent_weights],
]:
torch_w[(1 * cell_size) : (2 * cell_size), :] = tf_w[
(2 * cell_size) : (3 * cell_size), :
]
torch_w[(2 * cell_size) : (3 * cell_size), :] = tf_w[
(1 * cell_size) : (2 * cell_size), :
]
lstm.input_linearity.weight.data.copy_(torch.FloatTensor(input_weights))
lstm.state_linearity.weight.data.copy_(torch.FloatTensor(recurrent_weights))
lstm.input_linearity.weight.requires_grad = requires_grad
lstm.state_linearity.weight.requires_grad = requires_grad
# the bias weights
tf_bias = dataset["B"][...]
# tensorflow adds 1.0 to forget gate bias instead of modifying the
# parameters...
tf_bias[(2 * cell_size) : (3 * cell_size)] += 1
torch_bias = tf_bias.copy()
torch_bias[(1 * cell_size) : (2 * cell_size)] = tf_bias[
(2 * cell_size) : (3 * cell_size)
]
torch_bias[(2 * cell_size) : (3 * cell_size)] = tf_bias[
(1 * cell_size) : (2 * cell_size)
]
lstm.state_linearity.bias.data.copy_(torch.FloatTensor(torch_bias))
lstm.state_linearity.bias.requires_grad = requires_grad
# the projection weights
proj_weights = numpy.transpose(dataset["W_P_0"][...])
lstm.state_projection.weight.data.copy_(torch.FloatTensor(proj_weights))
lstm.state_projection.weight.requires_grad = requires_grad
| [
"torch.cat",
"torch.stack",
"torch.FloatTensor",
"torch.nn.utils.rnn.pad_packed_sequence"
] | 1.2.0 | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 |
1.2 | from typing import Dict, Tuple, List, NamedTuple, Any
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
from nltk import Tree
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder, FeedForward
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import masked_softmax, get_lengths_from_binary_sequence_mask
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.metrics import EvalbBracketingScorer, DEFAULT_EVALB_DIR
from allennlp.common.checks import ConfigurationError
class SpanInformation(NamedTuple):
"""
A helper namedtuple for handling decoding information.
# Parameters
start : `int`
The start index of the span.
end : `int`
The exclusive end index of the span.
no_label_prob : `float`
The probability of this span being assigned the `NO-LABEL` label.
label_prob : `float`
The probability of the most likely label.
"""
start: int
end: int
label_prob: float
no_label_prob: float
label_index: int
@Model.register("constituency_parser")
class SpanConstituencyParser(Model):
"""
This `SpanConstituencyParser` simply encodes a sequence of text
with a stacked `Seq2SeqEncoder`, extracts span representations using a
`SpanExtractor`, and then predicts a label for each span in the sequence.
These labels are non-terminal nodes in a constituency parse tree, which we then
greedily reconstruct.
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
span_extractor : `SpanExtractor`, required.
The method used to extract the spans from the encoded sequence.
encoder : `Seq2SeqEncoder`, required.
The encoder that we will use in between embedding tokens and
generating span representations.
feedforward : `FeedForward`, required.
The FeedForward layer that we will use in between the encoder and the linear
projection to a distribution over span labels.
pos_tag_embedding : `Embedding`, optional.
Used to embed the `pos_tags` `SequenceLabelField` we get as input to the model.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
evalb_directory_path : `str`, optional (default=`DEFAULT_EVALB_DIR`)
The path to the directory containing the EVALB executable used to score
bracketed parses. By default, will use the EVALB included with allennlp,
which is located at allennlp/tools/EVALB . If `None`, EVALB scoring
is not used.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
span_extractor: SpanExtractor,
encoder: Seq2SeqEncoder,
feedforward: FeedForward = None,
pos_tag_embedding: Embedding = None,
initializer: InitializerApplicator = InitializerApplicator(),
evalb_directory_path: str = DEFAULT_EVALB_DIR,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.span_extractor = span_extractor
self.num_classes = self.vocab.get_vocab_size("labels")
self.encoder = encoder
self.feedforward_layer = TimeDistributed(feedforward) if feedforward else None
self.pos_tag_embedding = pos_tag_embedding or None
if feedforward is not None:
output_dim = feedforward.get_output_dim()
else:
output_dim = span_extractor.get_output_dim()
self.tag_projection_layer = TimeDistributed(Linear(output_dim, self.num_classes))
representation_dim = text_field_embedder.get_output_dim()
if pos_tag_embedding is not None:
representation_dim += pos_tag_embedding.get_output_dim()
check_dimensions_match(
representation_dim,
encoder.get_input_dim(),
"representation dim (tokens + optional POS tags)",
"encoder input dim",
)
check_dimensions_match(
encoder.get_output_dim(),
span_extractor.get_input_dim(),
"encoder input dim",
"span extractor input dim",
)
if feedforward is not None:
check_dimensions_match(
span_extractor.get_output_dim(),
feedforward.get_input_dim(),
"span extractor output dim",
"feedforward input dim",
)
self.tag_accuracy = CategoricalAccuracy()
if evalb_directory_path is not None:
self._evalb_score = EvalbBracketingScorer(evalb_directory_path)
else:
self._evalb_score = None
initializer(self)
@overrides
def forward(
self, # type: ignore
tokens: TextFieldTensors,
spans: torch.LongTensor,
metadata: List[Dict[str, Any]],
pos_tags: TextFieldTensors = None,
span_labels: torch.LongTensor = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : TextFieldTensors, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
spans : `torch.LongTensor`, required.
A tensor of shape `(batch_size, num_spans, 2)` representing the
inclusive start and end indices of all possible spans in the sentence.
metadata : List[Dict[str, Any]], required.
A dictionary of metadata for each batch element which has keys:
tokens : `List[str]`, required.
The original string tokens in the sentence.
gold_tree : `nltk.Tree`, optional (default = None)
Gold NLTK trees for use in evaluation.
pos_tags : `List[str]`, optional.
The POS tags for the sentence. These can be used in the
model as embedded features, but they are passed here
in addition for use in constructing the tree.
pos_tags : `torch.LongTensor`, optional (default = None)
The output of a `SequenceLabelField` containing POS tags.
span_labels : `torch.LongTensor`, optional (default = None)
A torch tensor representing the integer gold class labels for all possible
spans, of shape `(batch_size, num_spans)`.
# Returns
An output dictionary consisting of:
class_probabilities : `torch.FloatTensor`
A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`
representing a distribution over the label classes per span.
spans : `torch.LongTensor`
The original spans tensor.
tokens : `List[List[str]]`, required.
A list of tokens in the sentence for each element in the batch.
pos_tags : `List[List[str]]`, required.
A list of POS tags in the sentence for each element in the batch.
num_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size), representing the lengths of non-padded spans
in `enumerated_spans`.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
if pos_tags is not None and self.pos_tag_embedding is not None:
embedded_pos_tags = self.pos_tag_embedding(pos_tags)
embedded_text_input = torch.cat([embedded_text_input, embedded_pos_tags], -1)
elif self.pos_tag_embedding is not None:
raise ConfigurationError("Model uses a POS embedding, but no POS tags were passed.")
mask = get_text_field_mask(tokens)
# Looking at the span start index is enough to know if
# this is padding or not. Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).long()
if span_mask.dim() == 1:
# This happens if you use batch_size 1 and encounter
# a length 1 sentence in PTB, which do exist. -.-
span_mask = span_mask.unsqueeze(-1)
if span_labels is not None and span_labels.dim() == 1:
span_labels = span_labels.unsqueeze(-1)
num_spans = get_lengths_from_binary_sequence_mask(span_mask)
encoded_text = self.encoder(embedded_text_input, mask)
span_representations = self.span_extractor(encoded_text, spans, mask, span_mask)
if self.feedforward_layer is not None:
span_representations = self.feedforward_layer(span_representations)
logits = self.tag_projection_layer(span_representations)
class_probabilities = masked_softmax(logits, span_mask.unsqueeze(-1))
output_dict = {
"class_probabilities": class_probabilities,
"spans": spans,
"tokens": [meta["tokens"] for meta in metadata],
"pos_tags": [meta.get("pos_tags") for meta in metadata],
"num_spans": num_spans,
}
if span_labels is not None:
loss = sequence_cross_entropy_with_logits(logits, span_labels, span_mask)
self.tag_accuracy(class_probabilities, span_labels, span_mask)
output_dict["loss"] = loss
# The evalb score is expensive to compute, so we only compute
# it for the validation and test sets.
batch_gold_trees = [meta.get("gold_tree") for meta in metadata]
if all(batch_gold_trees) and self._evalb_score is not None and not self.training:
gold_pos_tags: List[List[str]] = [
list(zip(*tree.pos()))[1] for tree in batch_gold_trees
]
predicted_trees = self.construct_trees(
class_probabilities.cpu().data,
spans.cpu().data,
num_spans.data,
output_dict["tokens"],
gold_pos_tags,
)
self._evalb_score(predicted_trees, batch_gold_trees)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Constructs an NLTK `Tree` given the scored spans. We also switch to exclusive
span ends when constructing the tree representation, because it makes indexing
into lists cleaner for ranges of text, rather than individual indices.
Finally, for batch prediction, we will have padded spans and class probabilities.
In order to make this less confusing, we remove all the padded spans and
distributions from `spans` and `class_probabilities` respectively.
"""
all_predictions = output_dict["class_probabilities"].cpu().data
all_spans = output_dict["spans"].cpu().data
all_sentences = output_dict["tokens"]
all_pos_tags = output_dict["pos_tags"] if all(output_dict["pos_tags"]) else None
num_spans = output_dict["num_spans"].data
trees = self.construct_trees(
all_predictions, all_spans, num_spans, all_sentences, all_pos_tags
)
batch_size = all_predictions.size(0)
output_dict["spans"] = [all_spans[i, : num_spans[i]] for i in range(batch_size)]
output_dict["class_probabilities"] = [
all_predictions[i, : num_spans[i], :] for i in range(batch_size)
]
output_dict["trees"] = trees
return output_dict
def construct_trees(
self,
predictions: torch.FloatTensor,
all_spans: torch.LongTensor,
num_spans: torch.LongTensor,
sentences: List[List[str]],
pos_tags: List[List[str]] = None,
) -> List[Tree]:
"""
Construct `nltk.Tree`'s for each batch element by greedily nesting spans.
The trees use exclusive end indices, which contrasts with how spans are
represented in the rest of the model.
# Parameters
predictions : `torch.FloatTensor`, required.
A tensor of shape `(batch_size, num_spans, span_label_vocab_size)`
representing a distribution over the label classes per span.
all_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size, num_spans, 2), representing the span
indices we scored.
num_spans : `torch.LongTensor`, required.
A tensor of shape (batch_size), representing the lengths of non-padded spans
in `enumerated_spans`.
sentences : `List[List[str]]`, required.
A list of tokens in the sentence for each element in the batch.
pos_tags : `List[List[str]]`, optional (default = None).
A list of POS tags for each word in the sentence for each element
in the batch.
# Returns
A `List[Tree]` containing the decoded trees for each element in the batch.
"""
# Switch to using exclusive end spans.
exclusive_end_spans = all_spans.clone()
exclusive_end_spans[:, :, -1] += 1
no_label_id = self.vocab.get_token_index("NO-LABEL", "labels")
trees: List[Tree] = []
for batch_index, (scored_spans, spans, sentence) in enumerate(
zip(predictions, exclusive_end_spans, sentences)
):
selected_spans = []
for prediction, span in zip(
scored_spans[: num_spans[batch_index]], spans[: num_spans[batch_index]]
):
start, end = span
no_label_prob = prediction[no_label_id]
label_prob, label_index = torch.max(prediction, -1)
# Does the span have a label != NO-LABEL or is it the root node?
# If so, include it in the spans that we consider.
if int(label_index) != no_label_id or (start == 0 and end == len(sentence)):
selected_spans.append(
SpanInformation(
start=int(start),
end=int(end),
label_prob=float(label_prob),
no_label_prob=float(no_label_prob),
label_index=int(label_index),
)
)
# The spans we've selected might overlap, which causes problems when we try
# to construct the tree as they won't nest properly.
consistent_spans = self.resolve_overlap_conflicts_greedily(selected_spans)
spans_to_labels = {
(span.start, span.end): self.vocab.get_token_from_index(span.label_index, "labels")
for span in consistent_spans
}
sentence_pos = pos_tags[batch_index] if pos_tags is not None else None
trees.append(self.construct_tree_from_spans(spans_to_labels, sentence, sentence_pos))
return trees
@staticmethod
def resolve_overlap_conflicts_greedily(spans: List[SpanInformation]) -> List[SpanInformation]:
"""
Given a set of spans, removes spans which overlap by evaluating the difference
in probability between one being labeled and the other explicitly having no label
and vice-versa. The worst case time complexity of this method is `O(k * n^4)` where `n`
is the length of the sentence that the spans were enumerated from (and therefore
`k * m^2` complexity with respect to the number of spans `m`) and `k` is the
number of conflicts. However, in practice, there are very few conflicts. Hopefully.
This function modifies `spans` to remove overlapping spans.
# Parameters
spans : `List[SpanInformation]`, required.
A list of spans, where each span is a `namedtuple` containing the
following attributes:
start : `int`
The start index of the span.
end : `int`
The exclusive end index of the span.
no_label_prob : `float`
The probability of this span being assigned the `NO-LABEL` label.
label_prob : `float`
The probability of the most likely label.
# Returns
A modified list of `spans`, with the conflicts resolved by considering local
differences between pairs of spans and removing one of the two spans.
"""
conflicts_exist = True
while conflicts_exist:
conflicts_exist = False
for span1_index, span1 in enumerate(spans):
for span2_index, span2 in list(enumerate(spans))[span1_index + 1 :]:
if (
span1.start < span2.start < span1.end < span2.end
or span2.start < span1.start < span2.end < span1.end
):
# The spans overlap.
conflicts_exist = True
# What's the more likely situation: that span2 was labeled
# and span1 was unlabled, or that span1 was labeled and span2
# was unlabled? In the first case, we delete span2 from the
# set of spans to form the tree - in the second case, we delete
# span1.
if (
span1.no_label_prob + span2.label_prob
< span2.no_label_prob + span1.label_prob
):
spans.pop(span2_index)
else:
spans.pop(span1_index)
break
return spans
@staticmethod
def construct_tree_from_spans(
spans_to_labels: Dict[Tuple[int, int], str], sentence: List[str], pos_tags: List[str] = None
) -> Tree:
"""
# Parameters
spans_to_labels : `Dict[Tuple[int, int], str]`, required.
A mapping from spans to constituency labels.
sentence : `List[str]`, required.
A list of tokens forming the sentence to be parsed.
pos_tags : `List[str]`, optional (default = None)
A list of the pos tags for the words in the sentence, if they
were either predicted or taken as input to the model.
# Returns
An `nltk.Tree` constructed from the labelled spans.
"""
def assemble_subtree(start: int, end: int):
if (start, end) in spans_to_labels:
# Some labels contain nested spans, e.g S-VP.
# We actually want to create (S (VP ...)) nodes
# for these labels, so we split them up here.
labels: List[str] = spans_to_labels[(start, end)].split("-")
else:
labels = None
# This node is a leaf.
if end - start == 1:
word = sentence[start]
pos_tag = pos_tags[start] if pos_tags is not None else "XX"
tree = Tree(pos_tag, [word])
if labels is not None and pos_tags is not None:
# If POS tags were passed explicitly,
# they are added as pre-terminal nodes.
while labels:
tree = Tree(labels.pop(), [tree])
elif labels is not None:
# Otherwise, we didn't want POS tags
# at all.
tree = Tree(labels.pop(), [word])
while labels:
tree = Tree(labels.pop(), [tree])
return [tree]
argmax_split = start + 1
# Find the next largest subspan such that
# the left hand side is a constituent.
for split in range(end - 1, start, -1):
if (start, split) in spans_to_labels:
argmax_split = split
break
left_trees = assemble_subtree(start, argmax_split)
right_trees = assemble_subtree(argmax_split, end)
children = left_trees + right_trees
if labels is not None:
while labels:
children = [Tree(labels.pop(), children)]
return children
tree = assemble_subtree(0, len(sentence))
return tree[0]
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics = {}
all_metrics["tag_accuracy"] = self.tag_accuracy.get_metric(reset=reset)
if self._evalb_score is not None:
evalb_metrics = self._evalb_score.get_metric(reset=reset)
all_metrics.update(evalb_metrics)
return all_metrics
| [
"torch.nn.modules.linear.Linear",
"torch.cat",
"torch.max"
] | 1.2.0 | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 |
1.2 | """
AllenNLP just uses
`PyTorch optimizers <https://pytorch.org/docs/master/optim.html>`_ ,
with a thin wrapper to allow registering them and instantiating them `from_params`.
The available optimizers are
* `"adadelta" <https://pytorch.org/docs/master/optim.html#torch.optim.Adadelta>`_
* `"adagrad" <https://pytorch.org/docs/master/optim.html#torch.optim.Adagrad>`_
* `"adam" <https://pytorch.org/docs/master/optim.html#torch.optim.Adam>`_
* `"adamw" <https://pytorch.org/docs/master/optim.html#torch.optim.AdamW>`_
* `"huggingface_adamw"
<https://huggingface.co/transformers/main_classes/optimizer_schedules.html#transformers.AdamW>`_
* `"sparse_adam" <https://pytorch.org/docs/master/optim.html#torch.optim.SparseAdam>`_
* `"sgd" <https://pytorch.org/docs/master/optim.html#torch.optim.SGD>`_
* `"rmsprop <https://pytorch.org/docs/master/optim.html#torch.optim.RMSprop>`_
* `"adamax <https://pytorch.org/docs/master/optim.html#torch.optim.Adamax>`_
* `"averaged_sgd <https://pytorch.org/docs/master/optim.html#torch.optim.ASGD>`_
"""
import logging
import re
import math
from typing import Any, Dict, List, Tuple, Union
import torch
import transformers
from allennlp.common import Params, Registrable
logger = logging.getLogger(__name__)
def make_parameter_groups(
model_parameters: List[Tuple[str, torch.nn.Parameter]],
groups: List[Tuple[List[str], Dict[str, Any]]] = None,
) -> Union[List[Dict[str, Any]], List[torch.nn.Parameter]]:
"""
Takes a list of model parameters with associated names (typically coming from something like
`model.parameters`), along with a grouping (as specified below), and prepares them to be passed
to the `__init__` function of a `torch.Optimizer`. This means separating the parameters into
groups with the given regexes, and prepping whatever keyword arguments are given for those
regexes in `groups`.
`groups` contains something like:
```
[
(["regex1", "regex2"], {"lr": 1e-3}),
(["regex3"], {"lr": 1e-4})
]
```
The return value in the right format to be passed directly as the `params` argument to a pytorch
`Optimizer`. If there are multiple groups specified, this is list of dictionaries, where each
dict contains a "parameter group" and groups specific options, e.g., {'params': [list of
parameters], 'lr': 1e-3, ...}. Any config option not specified in the additional options (e.g.
for the default group) is inherited from the top level arguments given in the constructor. See:
https://pytorch.org/docs/0.3.0/optim.html?#per-parameter-options. See also our
`test_optimizer_parameter_groups` test for an example of how this works in this code.
The dictionary's return type is labeled as `Any`, because it can be a `List[torch.nn.Parameter]`
(for the "params" key), or anything else (typically a float) for the other keys.
"""
if groups:
# In addition to any parameters that match group specific regex,
# we also need a group for the remaining "default" group.
# Those will be included in the last entry of parameter_groups.
parameter_groups: Union[List[Dict[str, Any]], List[torch.nn.Parameter]] = [
{"params": []} for _ in range(len(groups) + 1)
]
# add the group specific kwargs
for k in range(len(groups)):
parameter_groups[k].update(groups[k][1])
regex_use_counts: Dict[str, int] = {}
parameter_group_names: List[set] = [set() for _ in range(len(groups) + 1)]
for name, param in model_parameters:
# Determine the group for this parameter.
group_index = None
for k, group_regexes in enumerate(groups):
for regex in group_regexes[0]:
if regex not in regex_use_counts:
regex_use_counts[regex] = 0
if re.search(regex, name):
if group_index is not None and group_index != k:
raise ValueError(
"{} was specified in two separate parameter groups".format(name)
)
group_index = k
regex_use_counts[regex] += 1
if group_index is not None:
parameter_groups[group_index]["params"].append(param)
parameter_group_names[group_index].add(name)
else:
# the default group
parameter_groups[-1]["params"].append(param)
parameter_group_names[-1].add(name)
# log the parameter groups
logger.info("Done constructing parameter groups.")
for k in range(len(groups) + 1):
group_options = {
key: val for key, val in parameter_groups[k].items() if key != "params"
}
logger.info("Group %s: %s, %s", k, list(parameter_group_names[k]), group_options)
# check for unused regex
for regex, count in regex_use_counts.items():
if count == 0:
logger.warning(
"When constructing parameter groups, %s does not match any parameter name",
regex,
)
else:
parameter_groups = [param for name, param in model_parameters]
# Log the number of parameters to optimize
num_parameters = 0
for parameter_group in parameter_groups:
if isinstance(parameter_group, dict):
num_parameters += sum(parameter.numel() for parameter in parameter_group["params"])
else:
num_parameters += parameter_group.numel() # type: ignore
logger.info("Number of trainable parameters: %s", num_parameters)
return parameter_groups
class Optimizer(Registrable):
"""
This class just allows us to implement `Registrable` for Pytorch Optimizers. We do something a
little bit different with `Optimizers`, because they are implemented as classes in PyTorch, and
we want to use those classes. To make things easy, we just inherit from those classes, using
multiple inheritance to also inherit from `Optimizer`. The only reason we do this is to make
type inference on parameters possible, so we can construct these objects using our configuration
framework. If you are writing your own script, you can safely ignore these classes and just use
the `torch.optim` classes directly.
If you are implementing one of these classes, the `model_parameters` and `parameter_groups`
arguments to `__init__` are important, and should always be present. The trainer will pass
the trainable parameters in the model to the optimizer using the name `model_parameters`, so if
you use a different name, your code will crash. Nothing will technically crash if you use a
name other than `parameter_groups` for your second argument, it will just be annoyingly
inconsistent.
"""
default_implementation = "adam"
@staticmethod
def default(model_parameters: List) -> "Optimizer":
return Optimizer.from_params(model_parameters=model_parameters, params=Params({}))
@Optimizer.register("adam")
class AdamOptimizer(Optimizer, torch.optim.Adam):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0.0,
amsgrad: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
)
@Optimizer.register("sparse_adam")
class SparseAdamOptimizer(Optimizer, torch.optim.SparseAdam):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
)
@Optimizer.register("adamax")
class AdamaxOptimizer(Optimizer, torch.optim.Adamax):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.002,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0.0,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
)
@Optimizer.register("adamw")
class AdamWOptimizer(Optimizer, torch.optim.AdamW):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0.01,
amsgrad: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
)
@Optimizer.register("huggingface_adamw")
class HuggingfaceAdamWOptimizer(Optimizer, transformers.AdamW):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-06,
weight_decay: float = 0.0,
correct_bias: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
correct_bias=correct_bias,
)
@Optimizer.register("adagrad")
class AdagradOptimizer(Optimizer, torch.optim.Adagrad):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.01,
lr_decay: float = 0.0,
weight_decay: float = 0.0,
initial_accumulator_value: float = 0.0,
eps: float = 1e-10,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value,
eps=eps,
)
@Optimizer.register("adadelta")
class AdadeltaOptimizer(Optimizer, torch.optim.Adadelta):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 1.0,
rho: float = 0.9,
eps: float = 1e-06,
weight_decay: float = 0.0,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
rho=rho,
eps=eps,
weight_decay=weight_decay,
)
@Optimizer.register("sgd")
class SgdOptimizer(Optimizer, torch.optim.SGD):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
lr: float,
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
momentum: float = 0.0,
dampening: float = 0,
weight_decay: float = 0.0,
nesterov: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
)
@Optimizer.register("rmsprop")
class RmsPropOptimizer(Optimizer, torch.optim.RMSprop):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.01,
alpha: float = 0.99,
eps: float = 1e-08,
weight_decay: float = 0.0,
momentum: float = 0.0,
centered: bool = False,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
)
@Optimizer.register("averaged_sgd")
class AveragedSgdOptimizer(Optimizer, torch.optim.ASGD):
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr: float = 0.01,
lambd: float = 0.0001,
alpha: float = 0.75,
t0: float = 1000000.0,
weight_decay: float = 0.0,
):
super().__init__(
params=make_parameter_groups(model_parameters, parameter_groups),
lr=lr,
lambd=lambd,
alpha=alpha,
t0=t0,
weight_decay=weight_decay,
)
@Optimizer.register("dense_sparse_adam")
class DenseSparseAdam(Optimizer, torch.optim.Optimizer):
"""
NOTE: This class has been copied verbatim from the separate Dense and
Sparse versions of Adam in Pytorch.
Implements Adam algorithm with dense & sparse gradients.
It has been proposed in Adam: A Method for Stochastic Optimization.
# Parameters
params : `iterable`
iterable of parameters to optimize or dicts defining parameter groups
lr : `float`, optional (default: 1e-3)
The learning rate.
betas : `Tuple[float, float]`, optional (default: (0.9, 0.999))
coefficients used for computing running averages of gradient
and its square.
eps : `float`, optional, (default: 1e-8)
A term added to the denominator to improve numerical stability.
"""
def __init__(
self,
model_parameters: List[Tuple[str, torch.nn.Parameter]],
parameter_groups: List[Tuple[List[str], Dict[str, Any]]] = None,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps)
super().__init__(make_parameter_groups(model_parameters, parameter_groups), defaults)
def step(self, closure=None):
"""
Performs a single optimization step.
# Parameters
closure : `callable`, optional.
A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
state["step"] += 1
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
# Decay the first and second moment running average coefficient
# old <- b * old + (1 - b) * new
# <==> old += (1 - b) * (new - old)
old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
exp_avg.add_(make_sparse(exp_avg_update_values))
old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
exp_avg_sq_update_values = (
grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
)
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
# Dense addition again is intended, avoiding another sparse_mask
numer = exp_avg_update_values.add_(old_exp_avg_values)
exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
denom = exp_avg_sq_update_values.sqrt_().add_(group["eps"])
del exp_avg_update_values, exp_avg_sq_update_values
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(make_sparse(-step_size * numer.div_(denom)))
else:
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group["eps"])
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
return loss
| [
"torch.zeros_like"
] | 1.2.0 | justindujardin/allennlp | c4559f3751775aa8bc018db417edc119d29d8051 |
1.7 | from typing import Union, List
import torch
from torch import nn as nn
from torch.nn import functional as F
from models.layers.create_act import get_act_layer
from .trace_utils import _assert
class BatchNormAct2d(nn.BatchNorm2d):
"""BatchNorm + Activation
This module performs BatchNorm + Activation in a manner that will remain backwards
compatible with weights trained with separate bn, act. This is why we inherit from BN
instead of composing it as a .bn member.
"""
def __init__(
self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):
super(BatchNormAct2d, self).__init__(
num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
act_layer = get_act_layer(act_layer) # string -> nn.Module
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, x):
# cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing
_assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)')
# exponential_average_factor is set to self.momentum
# (when it is available) only so that it gets updated
# in ONNX graph when this node is exported to ONNX.
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked = self.num_batches_tracked + \
1 # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / \
float(self.num_batches_tracked)
else: # use exponential moving average
exponential_average_factor = self.momentum
r"""
Decide whether the mini-batch stats should be used for normalization rather than the buffers.
Mini-batch stats are used in training mode, and in eval mode when buffers are None.
"""
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (
self.running_var is None)
r"""
Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be
passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are
used for normalization (i.e. in eval mode when buffers are not None).
"""
x = F.batch_norm(
x,
# If buffers are not to be tracked, ensure that they won't be updated
self.running_mean if not self.training or self.track_running_stats else None,
self.running_var if not self.training or self.track_running_stats else None,
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
x = self.drop(x)
x = self.act(x)
return x
def _num_groups(num_channels, num_groups, group_size):
if group_size:
assert num_channels % group_size == 0
return num_channels // group_size
return num_groups
class GroupNormAct(nn.GroupNorm):
# NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args
def __init__(
self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):
super(GroupNormAct, self).__init__(
_num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
act_layer = get_act_layer(act_layer) # string -> nn.Module
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, x):
x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps)
x = self.drop(x)
x = self.act(x)
return x
class LayerNormAct(nn.LayerNorm):
def __init__(
self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):
super(LayerNormAct, self).__init__(
normalization_shape, eps=eps, elementwise_affine=affine)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
act_layer = get_act_layer(act_layer) # string -> nn.Module
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, x):
x = F.layer_norm(x, self.normalized_shape,
self.weight, self.bias, self.eps)
x = self.drop(x)
x = self.act(x)
return x
class LayerNormAct2d(nn.LayerNorm):
def __init__(
self, num_channels, eps=1e-5, affine=True,
apply_act=True, act_layer=nn.ReLU, inplace=True, drop_layer=None):
super(LayerNormAct2d, self).__init__(
num_channels, eps=eps, elementwise_affine=affine)
self.drop = drop_layer() if drop_layer is not None else nn.Identity()
act_layer = get_act_layer(act_layer) # string -> nn.Module
if act_layer is not None and apply_act:
act_args = dict(inplace=True) if inplace else {}
self.act = act_layer(**act_args)
else:
self.act = nn.Identity()
def forward(self, x):
x = F.layer_norm(
x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)
x = self.drop(x)
x = self.act(x)
return x
| [
"torch.nn.functional.batch_norm",
"torch.nn.Identity",
"torch.nn.functional.group_norm",
"torch.nn.functional.layer_norm"
] | 1.7.1 | hmthanh/LaTeX_OCR | bf5cf4642aff9cbbd5c4f8f232cd993a38ee6d81 |
0.4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
worker.py:
- Contains the definition of the ``Worker`` class, representing the base of the basic workers, such as \
``OnlineTrainer`` and ``Tester``.
"""
__author__ = "Vincent Marois, Tomasz Kornuta, Ryan L. McAvoy"
import os
import yaml
import torch
import logging
import logging.config
import argparse
import numpy as np
from random import randrange
from abc import abstractmethod
from torch.utils.data import DataLoader
from miprometheus.utils.sampler_factory import SamplerFactory
from miprometheus.problems.problem_factory import ProblemFactory
# Import utils.
from miprometheus.utils.app_state import AppState
from miprometheus.utils.param_interface import ParamInterface
class Worker(object):
"""
Base abstract class for the workers.
All base workers should subclass it and override the relevant methods.
"""
def __init__(self, name, add_default_parser_args = True):
"""
Base constructor for all workers:
- Initializes the AppState singleton:
>>> self.app_state = AppState()
- Initializes the Parameter Registry:
>>> self.params = ParamInterface()
- Defines the logger:
>>> self.logger = logging.getLogger(name=self.name)
- Creates parser and adds default worker command line arguments.
:param name: Name of the worker.
:type name: str
:param add_default_parser_args: If set, adds default parser arguments (DEFAULT: True).
:type add_default_parser_args: bool
"""
# Call base constructor.
super(Worker, self).__init__()
# Set worker name.
self.name = name
# Initialize the application state singleton.
self.app_state = AppState()
# Initialize parameter interface/registry.
self.params = ParamInterface()
# Initialize logger using the configuration.
self.initialize_logger()
# Create parser with a list of runtime arguments.
self.parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
# Add arguments to the specific parser.
if add_default_parser_args:
# These arguments will be shared by all basic workers.
self.parser.add_argument('--config',
dest='config',
type=str,
default='',
help='Name of the configuration file(s) to be loaded. '
'If specifying more than one file, they must be separated with coma ",".')
self.parser.add_argument('--model',
type=str,
default='',
dest='model',
help='Path to the file containing the saved parameters'
' of the model to load (model checkpoint, should end with a .pt extension.)')
self.parser.add_argument('--gpu',
dest='use_gpu',
action='store_true',
help='The current worker will move the computations on GPU devices, if available '
'in the system. (Default: False)')
self.parser.add_argument('--expdir',
dest='expdir',
type=str,
default="./experiments",
help='Path to the directory where the experiment(s) folders are/will be stored.'
' (DEFAULT: ./experiments)')
self.parser.add_argument('--savetag',
dest='savetag',
type=str,
default='',
help='Tag for the save directory.')
self.parser.add_argument('--ll',
action='store',
dest='log_level',
type=str,
default='INFO',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'],
help="Log level. (Default: INFO)")
self.parser.add_argument('--li',
dest='logging_interval',
default=100,
type=int,
help='Statistics logging interval. Will impact logging to the logger and '
'exporting to TensorBoard. Writing to the csv file is not impacted '
'(interval of 1).(Default: 100, i.e. logs every 100 episodes).')
self.parser.add_argument('--agree',
dest='confirm',
action='store_true',
help='Request user confirmation just after loading the settings, '
'before starting training. (Default: False)')
def initialize_logger(self):
"""
Initializes the logger, with a specific configuration:
>>> logger_config = {'version': 1,
>>> 'disable_existing_loggers': False,
>>> 'formatters': {
>>> 'simple': {
>>> 'format': '[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',
>>> 'datefmt': '%Y-%m-%d %H:%M:%S'}},
>>> 'handlers': {
>>> 'console': {
>>> 'class': 'logging.StreamHandler',
>>> 'level': 'INFO',
>>> 'formatter': 'simple',
>>> 'stream': 'ext://sys.stdout'}},
>>> 'root': {'level': 'DEBUG',
>>> 'handlers': ['console']}}
"""
# Load the default logger configuration.
logger_config = {'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'}},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'simple',
'stream': 'ext://sys.stdout'}},
'root': {'level': 'DEBUG',
'handlers': ['console']}}
logging.config.dictConfig(logger_config)
# Create the Logger, set its label and logging level.
self.logger = logging.getLogger(name=self.name)
def display_parsing_results(self):
"""
Displays the properly & improperly parsed arguments (if any).
"""
# Log the parsed flags.
flags_str = 'Properly parsed command line arguments: \n'
flags_str += '='*80 + '\n'
for arg in vars(self.flags):
flags_str += "{}= {} \n".format(arg, getattr(self.flags, arg))
flags_str += '='*80 + '\n'
self.logger.info(flags_str)
# Log the unparsed flags if any.
if self.unparsed:
flags_str = 'Invalid command line arguments: \n'
flags_str += '='*80 + '\n'
for arg in self.unparsed:
flags_str += "{} \n".format(arg)
flags_str += '='*80 + '\n'
self.logger.warning(flags_str)
def setup_experiment(self):
"""
Setups a specific experiment.
Base method:
- Parses command line arguments.
- Sets the 3 default sections (training / validation / test) and sets their dataloaders params.
.. note::
Child classes should override this method, but still call its parent to draw the basic functionality \
implemented here.
"""
# Parse arguments.
self.flags, self.unparsed = self.parser.parse_known_args()
# Set logger depending on the settings.
self.logger.setLevel(getattr(logging, self.flags.log_level.upper(), None))
# add empty sections
self.params.add_default_params({"training": {'terminal_conditions': {}}})
self.params.add_default_params({"validation": {}})
self.params.add_default_params({"testing": {}})
# set a default configuration section for the DataLoaders
dataloader_config = {'dataloader': {'shuffle': True, # shuffle set by default.
'batch_sampler': None,
'num_workers': 0, # Do not use multiprocessing by default - for now.
'pin_memory': False,
'drop_last': False,
'timeout': 0},
'sampler': {}, # not using sampler by default
}
self.params["training"].add_default_params(dataloader_config)
self.params["validation"].add_default_params(dataloader_config)
self.params["testing"].add_default_params(dataloader_config)
def build_problem_sampler_loader(self, params, section_name):
"""
Builds and returns the Problem class, alongside its DataLoader.
Also builds the sampler if required.
:param params: 'ParamInterface' object, referring to one of main sections (training/validation/testing).
:type params: miprometheus.utils.ParamInterface
:param section_name: name of the section that will be used by logger for display.
:return: Problem instance & DataLoader instance.
"""
# Build the problem.
problem = ProblemFactory.build(params['problem'])
# Try to build the sampler.
sampler = SamplerFactory.build(problem, params['sampler'])
if sampler is not None:
# Set shuffle to False - REQUIRED as those two are exclusive.
params['dataloader'].add_config_params({'shuffle': False})
# build the DataLoader on top of the validation problem
loader = DataLoader(dataset=problem,
batch_size=params['problem']['batch_size'],
shuffle=params['dataloader']['shuffle'],
sampler=sampler,
batch_sampler=params['dataloader']['batch_sampler'],
num_workers=params['dataloader']['num_workers'],
collate_fn=problem.collate_fn,
pin_memory=params['dataloader']['pin_memory'],
drop_last=params['dataloader']['drop_last'],
timeout=params['dataloader']['timeout'],
worker_init_fn=problem.worker_init_fn)
# Display sizes.
self.logger.info("Problem for '{}' loaded (size: {})".format(section_name, len(problem)))
if (sampler is not None):
self.logger.info("Sampler for '{}' created (size: {})".format(section_name, len(sampler)))
# Return sampler - even if it is none :]
return problem, sampler, loader
def get_epoch_size(self, problem, sampler, batch_size, drop_last):
"""
Compute the number of iterations ('episodes') to run given the size of the dataset and the batch size to cover
the entire dataset once.
Takes into account whether one used sampler or not.
:param problem: Object derived from the ''Problem'' class
:param sampler: Sampler (may be None)
:param batch_size: Batch size.
:type batch_size: int
:param drop_last: If True then last batch (if incomplete) will not be counted
:type drop_last: bool
.. note::
If the last batch is incomplete we are counting it in when ``drop_last`` in ``DataLoader()`` is set to Ttrue.
.. warning::
Leaving this method 'just in case', in most cases one might simply use ''len(dataloader)''.
:return: Number of iterations to perform to go though the entire dataset once.
"""
# "Estimate" dataset size.
if (sampler is not None):
problem_size = len(sampler)
else:
problem_size = len(problem)
# If problem_size is a multiciplity of batch_size OR drop last is set.
if (problem_size % batch_size) == 0 or drop_last:
return problem_size // batch_size
else:
return (problem_size // batch_size) + 1
def export_experiment_configuration(self, log_dir, filename, user_confirm):
"""
Dumps the configuration to ``yaml`` file.
:param log_dir: Directory used to host log files (such as the collected statistics).
:type log_dir: str
:param filename: Name of the ``yaml`` file to write to.
:type filename: str
:param user_confirm: Whether to request user confirmation.
:type user_confirm: bool
"""
# -> At this point, all configuration for experiment is complete.
# Display results of parsing.
self.display_parsing_results()
# Log the resulting training configuration.
conf_str = 'Final parameter registry configuration:\n'
conf_str += '='*80 + '\n'
conf_str += yaml.safe_dump(self.params.to_dict(), default_flow_style=False)
conf_str += '='*80 + '\n'
self.logger.info(conf_str)
# Save the resulting configuration into a .yaml settings file, under log_dir
with open(log_dir + filename, 'w') as yaml_backup_file:
yaml.dump(self.params.to_dict(), yaml_backup_file, default_flow_style=False)
# Ask for confirmation - optional.
if user_confirm:
try:
input('Press <Enter> to confirm and start the experiment\n')
except KeyboardInterrupt:
exit(0)
def add_statistics(self, stat_col):
"""
Adds most elementary shared statistics to ``StatisticsCollector``: episode and loss.
:param stat_col: ``StatisticsCollector``.
"""
# Add default statistics with formatting.
stat_col.add_statistic('loss', '{:12.10f}')
stat_col.add_statistic('episode', '{:06d}')
def add_aggregators(self, stat_agg):
"""
Adds basic statistical aggregators to ``StatisticsAggregator``: episode, \
episodes_aggregated and loss derivatives.
:param stat_agg: ``StatisticsAggregator``.
"""
# add 'aggregators' for the episode.
stat_agg.add_aggregator('episode', '{:06d}')
# Number of aggregated episodes.
stat_agg.add_aggregator('episodes_aggregated', '{:06d}')
# Add default statistical aggregators for the loss (indicating a formatting).
# Represents the average loss, but stying with loss for TensorBoard "variable compatibility".
stat_agg.add_aggregator('loss', '{:12.10f}')
stat_agg.add_aggregator('loss_min', '{:12.10f}')
stat_agg.add_aggregator('loss_max', '{:12.10f}')
stat_agg.add_aggregator('loss_std', '{:12.10f}')
def aggregate_statistics(self, stat_col, stat_agg):
"""
Aggregates the default statistics collected by the ``StatisticsCollector``.
.. note::
Only computes the min, max, mean, std of the loss as these are basic statistical aggregator by default.
Given that the ``StatisticsAggregator`` uses the statistics collected by the ``StatisticsCollector``, \
It should be ensured that these statistics are correctly collected (i.e. use of ``self.add_statistics()`` \
and ``collect_statistics()``).
:param stat_col: ``StatisticsCollector``
:param stat_agg: ``StatisticsAggregator``
"""
# By default, copy the last value for all variables have matching names.
# (will work well for e.g. episode or epoch)
for k, v in stat_col.items():
if k in stat_agg.aggregators:
# Copy last collected value.
stat_agg.aggregators[k] = v[-1]
# Get loss values.
loss_values = stat_col['loss']
# Calculate default aggregates.
stat_agg.aggregators['loss'] = torch.mean(torch.tensor(loss_values))
stat_agg.aggregators['loss_min'] = min(loss_values)
stat_agg.aggregators['loss_max'] = max(loss_values)
stat_agg.aggregators['loss_std'] = 0.0 if len(loss_values) <= 1 else torch.std(torch.tensor(loss_values))
stat_agg.aggregators['episodes_aggregated'] = len(loss_values)
@abstractmethod
def run_experiment(self):
"""
Main function of the worker which executes a specific experiment.
.. note::
Abstract. Should be implemented in the subclasses.
"""
def add_file_handler_to_logger(self, logfile):
"""
Add a ``logging.FileHandler`` to the logger of the current ``Worker``.
Specifies a ``logging.Formatter``:
>>> logging.Formatter(fmt='[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',
>>> datefmt='%Y-%m-%d %H:%M:%S')
:param logfile: File used by the ``FileHandler``.
"""
# create file handler which logs even DEBUG messages
fh = logging.FileHandler(logfile)
# set logging level for this file
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter(fmt='[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
fh.setFormatter(formatter)
# add the handler to the logger
self.logger.addHandler(fh)
def recurrent_config_parse(self, configs: str, configs_parsed: list):
"""
Parses names of configuration files in a recursive manner, i.e. \
by looking for ``default_config`` sections and trying to load and parse those \
files one by one.
:param configs: String containing names of configuration files (with paths), separated by comas.
:type configs: str
:param configs_parsed: Configurations that were already parsed (so we won't parse them many times).
:type configs_parsed: list
:return: list of parsed configuration files.
"""
# Split and remove spaces.
configs_to_parse = configs.replace(" ", "").split(',')
# Terminal condition.
while len(configs_to_parse) > 0:
# Get config.
config = configs_to_parse.pop(0)
# Skip empty names (after lose comas).
if config == '':
continue
print("Info: Parsing the {} configuration file".format(config))
# Check if it was already loaded.
if config in configs_parsed:
print('Warning: Configuration file {} already parsed - skipping'.format(config))
continue
# Check if file exists.
if not os.path.isfile(config):
print('Error: Configuration file {} does not exist'.format(config))
exit(-1)
try:
# Open file and get parameter dictionary.
with open(config, 'r') as stream:
param_dict = yaml.safe_load(stream)
except yaml.YAMLError as e:
print("Error: Couldn't properly parse the {} configuration file".format(config))
print('yaml.YAMLERROR:', e)
exit(-1)
# Remember that we loaded that config.
configs_parsed.append(config)
# Check if there are any default configs to load.
if 'default_configs' in param_dict:
# If there are - recursion!
configs_parsed = self.recurrent_config_parse(
param_dict['default_configs'], configs_parsed)
# Done, return list of loaded configs.
return configs_parsed
def recurrent_config_load(self,configs_to_load):
for config in reversed(configs_to_load):
# Load params from YAML file.
self.params.add_config_params_from_yaml(config)
print('Loaded configuration from file {}'.format(config))
def check_and_set_cuda(self, use_gpu):
"""
Enables computations on CUDA if GPU is available.
Sets the default data types.
:param use_gpu: Command line flag indicating whether use GPU/CUDA or not.
"""
# Determine if GPU/CUDA is available.
if torch.cuda.is_available():
if use_gpu:
self.app_state.convert_cuda_types()
self.logger.info('Running computations on GPU using CUDA enabled')
elif use_gpu:
self.logger.warning('GPU flag is enabled but there are no available GPU devices, using CPU instead')
else:
self.logger.warning('GPU flag is disabled, using CPU.')
def predict_evaluate_collect(self, model, problem, data_dict, stat_col, episode, epoch=None):
"""
Function that performs the following:
- passes samples through the model,
- computes loss using the problem
- collects problem and model statistics,
:param model: trainable model.
:type model: ``models.model.Model`` or a subclass
:param problem: problem generating samples.
:type problem: ``problems.problem.problem`` or a subclass
:param data_dict: contains the batch of samples to pass to the model.
:type data_dict: ``DataDict``
:param stat_col: statistics collector used for logging accuracy etc.
:type stat_col: ``StatisticsCollector``
:param episode: current episode index
:type episode: int
:param epoch: current epoch index.
:type epoch: int, optional
:return:
- logits,
- loss
"""
# Convert to CUDA.
if self.app_state.use_CUDA:
data_dict = data_dict.cuda()
# Perform forward calculation.
logits = model(data_dict)
# Evaluate loss function.
loss = problem.evaluate_loss(data_dict, logits)
# Collect "elementary" statistics - episode and loss.
if ('epoch' in stat_col) and (epoch is not None):
stat_col['epoch'] = epoch
stat_col['episode'] = episode
# Collect loss as float.
stat_col['loss'] = loss.item()
# Collect other (potential) statistics from problem & model.
problem.collect_statistics(stat_col, data_dict, logits)
model.collect_statistics(stat_col, data_dict, logits)
# Return tuple: logits, loss.
return logits, loss
def export_statistics(self, stat_obj, tag='', export_to_log = True):
"""
Export the statistics/aggregations to logger, csv and TB.
:param stat_obj: ``StatisticsCollector`` or ``StatisticsAggregato`` object.
:param tag: Additional tag that will be added to string exported to logger, optional (DEFAULT = '').
:type tag: str
:param export_to_log: If True, exports statistics to logger (DEFAULT: True)
:type export_to_log: bool
"""
# Log to logger
if export_to_log:
self.logger.info(stat_obj.export_to_string(tag))
# Export to csv
stat_obj.export_to_csv()
# Export to TensorBoard.
stat_obj.export_to_tensorboard()
def aggregate_and_export_statistics(self, problem, model, stat_col, stat_agg, episode, tag='', export_to_log = True):
"""
Aggregates the collected statistics. Exports the aggregations to logger, csv and TB. \
Empties statistics collector for the next episode.
:param model: trainable model.
:type model: ``models.model.Model`` or a subclass
:param problem: problem generating samples.
:type problem: ``problems.problem.problem`` or a subclass
:param stat_col: ``StatisticsCollector`` object.
:param stat_agg: ``StatisticsAggregator`` object.
:param tag: Additional tag that will be added to string exported to logger, optional (DEFAULT = '').
:type tag: str
:param export_to_log: If True, exports statistics to logger (DEFAULT: True)
:type export_to_log: bool
"""
# Aggregate statistics.
self.aggregate_statistics(stat_col, stat_agg)
problem.aggregate_statistics(stat_col, stat_agg)
model.aggregate_statistics(stat_col, stat_agg)
# Set episode, so the datapoint will appear in the right place in TB.
stat_agg["episode"] = episode
# Export to logger, cvs and TB.
self.export_statistics(stat_agg, tag, export_to_log)
def cycle(self, iterable):
"""
Cycle an iterator to prevent its exhaustion.
This function is used in the (online) trainer to reuse the same ``DataLoader`` for a number of episodes\
> len(dataset)/batch_size.
:param iterable: iterable.
:type iterable: iter
"""
while True:
for x in iterable:
yield x
def set_random_seeds(self, params, section_name):
"""
Set ``torch`` & ``NumPy`` random seeds from the ``ParamRegistry``: \
If one was indicated, use it, or set a random one.
:param params: Section in config/param registry that will be changed \
("training" or "testing" only will be taken into account.)
:param section_name: Name of the section (for logging purposes only).
:type section_name: str
"""
# Set the random seeds: either from the loaded configuration or a default randomly selected one.
params.add_default_params({"seed_numpy": -1})
if params["seed_numpy"] == -1:
seed = randrange(0, 2 ** 32)
# Overwrite the config param!
params.add_config_params({"seed_numpy": seed})
self.logger.info("Setting numpy random seed in {} to: {}".format(section_name, params["seed_numpy"]))
np.random.seed(params["seed_numpy"])
params.add_default_params({"seed_torch": -1})
if params["seed_torch"] == -1:
seed = randrange(0, 2 ** 32)
# Overwrite the config param!
params.add_config_params({"seed_torch": seed})
self.logger.info("Setting torch random seed in {} to: {}".format(section_name, params["seed_torch"]))
torch.manual_seed(params["seed_torch"])
torch.cuda.manual_seed_all(params["seed_torch"])
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader"
] | 0.4.0 | tsjayram/mi-prometheus | cf163d9e246c3ae3c100045e58924148b2f81c39 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from einops import rearrange, repeat
class CrissCrossAttention(nn.Module):
def __init__(self, in_dim):
super(CrissCrossAttention, self).__init__()
self.query_conv = nn.Conv2d(in_channels=in_dim,
out_channels=in_dim // 8,
kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim,
out_channels=in_dim // 8,
kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim,
out_channels=in_dim,
kernel_size=1)
self.softmax = nn.Softmax(dim=3)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
device = x.device
b, _, h, w = x.shape
q = self.query_conv(x)
q_h = rearrange(q, "b c h w -> (b w) h c")
q_w = rearrange(q, "b c h w -> (b h) w c")
k = self.key_conv(x)
k_h = rearrange(k, "b c h w -> (b w) c h")
k_w = rearrange(k, "b c h w -> (b h) c w")
v = self.value_conv(x)
v_h = rearrange(v, "b c h w -> (b w) c h")
v_w = rearrange(v, "b c h w -> (b h) c w")
inf = repeat(torch.diag(
torch.tensor(float("-inf"), device=device).repeat(h), 0),
"h1 h2 -> (b w) h1 h2",
b=b,
w=w)
e_h = rearrange(torch.bmm(q_h, k_h) + inf,
"(b w) h1 h2 -> b h1 w h2",
b=b)
e_w = rearrange(torch.bmm(q_w, k_w), "(b h) w1 w2 -> b h w1 w2", b=b)
attn = self.softmax(torch.cat([e_h, e_w], 3))
attn_h, attn_w = attn.chunk(2, dim=-1)
attn_h = rearrange(attn_h, "b h1 w h2 -> (b w) h1 h2")
attn_w = rearrange(attn_w, "b h w1 w2 -> (b h) w1 w2")
out_h = torch.bmm(v_h, rearrange(attn_h, "bw h1 h2 -> bw h2 h1"))
out_h = rearrange(out_h, "(b w) c h -> b c h w", b=b)
out_w = torch.bmm(v_w, rearrange(attn_w, "bh w1 w2 -> bh w2 w1"))
out_w = rearrange(out_w, "(b h) c w -> b c h w", b=b)
return_attn = torch.stack([
rearrange(attn_h, "(b w) h1 h2 -> b h2 h1 w", b=b),
rearrange(attn_w, "(b h) w1 w2 -> b w2 h w1", b=b)
],
dim=1)
return self.gamma * (out_h + out_w) + x, return_attn
class RCCAModule(nn.Module):
def __init__(self, in_channels, kernel_size=3, return_attn=False):
super(RCCAModule, self).__init__()
self.return_attn = return_attn
inter_channels = in_channels // 4
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels,
inter_channels,
kernel_size=(kernel_size, kernel_size),
stride=(1, 1),
padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),
bias=False), nn.BatchNorm2d(inter_channels), nn.ReLU())
self.cca = CrissCrossAttention(inter_channels)
self.conv2 = nn.Sequential(
nn.Conv2d(inter_channels,
in_channels,
kernel_size=(kernel_size, kernel_size),
stride=(1, 1),
padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2),
bias=False), nn.BatchNorm2d(in_channels), nn.ReLU())
def forward(self, x):
output = self.conv1(x)
attns = []
for _ in range(2):
output, attn = checkpoint(self.cca, output)
attns.append(attn)
output = self.conv2(output)
if self.return_attn:
return output, attns
else:
return output
| [
"torch.zeros",
"torch.cat",
"torch.nn.Softmax",
"torch.nn.BatchNorm2d",
"torch.bmm",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.utils.checkpoint.checkpoint"
] | 1.7.1 | antonkulaga/DeepAb | 51a32d06d19815705bdbfb35a8a9518c17ec313a |
1.4 | import torch
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
FloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor
| [
"torch.device",
"torch.cuda.is_available"
] | 1.4.0 | dgoodwin208/6.883ProteinDocking | 07f33688bd5ec8c5ae6d4d4113eb64b0f2352e9e |
1.6 | import json
from pathlib import Path
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset, TensorDataset
from tfrecord.torch.dataset import MultiTFRecordDataset
from uncertainty_eval.datasets.tabular import TabularDataset
from uncertainty_eval.datasets.abstract_datasplit import DatasetSplit
class GaussianNoise(DatasetSplit):
def __init__(self, data_root, mean, std, length=10_000):
self.data_root = data_root
self.mean = mean
self.std = std
self.length = length
def train(self, transform):
return self.test(transform)
def val(self, transform):
return self.test(transform)
def test(self, transform):
return GaussianNoiseDataset(self.length, self.mean, self.std, transform)
class GaussianNoiseDataset(Dataset):
"""
Use CIFAR-10 mean and standard deviation as default values.
mean=(125.3, 123.0, 113.9), std=(63.0, 62.1, 66.7)
"""
def __init__(self, length, mean, std, transform=None):
self.transform = transform
self.mean = mean
self.std = std
self.length = length
self.dist = torch.distributions.Normal(mean, std)
def __len__(self):
return self.length
def __getitem__(self, idx):
img = self.dist.sample()
if len(self.mean.shape) == 3:
img = Image.fromarray(img.numpy().squeeze().astype(np.uint8))
if self.transform is not None:
img = self.transform(img)
return img, -1
class Constant(DatasetSplit):
def __init__(self, data_root, low, high, shape, length=10_000):
self.low = low
self.high = high
self.length = length
self.shape = shape
def train(self, transform):
return self.test(transform)
def val(self, transform):
return self.test(transform)
def test(self, transform):
return ConstantDataset(self.length, self.low, self.high, self.shape, transform)
class ConstantDataset(Dataset):
def __init__(self, length, low, high, shape, transform=None):
assert isinstance(low, float) and isinstance(high, float)
self.low = low
self.high = high
self.transform = transform
self.length = length
self.shape = shape
self.dist = torch.distributions.Uniform(low, high)
def __len__(self):
return self.length
def __getitem__(self, idx):
sample = self.dist.sample().item()
sample = torch.empty(self.shape).fill_(sample)
if len(self.shape) == 3:
sample = Image.fromarray(sample.numpy().squeeze().astype(np.uint8))
if self.transform is not None:
sample = self.transform(sample)
return sample, -1
class UniformNoise(DatasetSplit):
def __init__(self, data_root, low, high, length=10_000):
self.low = low
self.high = high
self.length = length
def train(self, transform):
return self.test(transform)
def val(self, transform):
return self.test(transform)
def test(self, transform):
return UniformNoiseDataset(self.length, self.low, self.high, transform)
class UniformNoiseDataset(Dataset):
def __init__(self, length, low, high, transform=None):
self.low = low
self.high = high
self.transform = transform
self.length = length
self.dist = torch.distributions.Uniform(low, high)
def __len__(self):
return self.length
def __getitem__(self, idx):
img = self.dist.sample()
if len(self.low.shape) == 3:
img = Image.fromarray(img.numpy().squeeze().astype(np.uint8))
if self.transform is not None:
img = self.transform(img)
return img, -1
class OODGenomics(torch.utils.data.IterableDataset):
"""PyTorch Dataset implementation for the Bacteria Genomics OOD dataset (https://github.com/google-research/google-research/tree/master/genomics_ood) proposed in
J. Ren et al., “Likelihood Ratios for Out-of-Distribution Detection,” arXiv:1906.02845 [cs, stat], Available: http://arxiv.org/abs/1906.02845.
"""
splits = {
"train": "before_2011_in_tr",
"val": "between_2011-2016_in_val",
"test": "after_2016_in_test",
"val_ood": "between_2011-2016_ood_val",
"test_ood": "after_2016_ood_test",
}
def __init__(self, data_root, split="train", transform=None, target_transform=None):
if isinstance(data_root, str):
data_root = Path(data_root)
self.data_root = data_root / "llr_ood_genomics"
assert split in self.splits, f"Split '{split}' does not exist."
split_dir = self.data_root / self.splits[split]
tf_record_ids = [f.stem for f in split_dir.iterdir() if f.suffix == ".tfrecord"]
self.ds = MultiTFRecordDataset(
data_pattern=str(split_dir / "{}.tfrecord"),
index_pattern=str(split_dir / "{}.index"),
splits={id_: 1 / len(tf_record_ids) for id_ in tf_record_ids},
description={"x": "byte", "y": "int", "z": "byte"},
)
with open(self.data_root / "label_dict.json") as f:
label_dict = json.load(f)
self.label_dict = {v: k for k, v in label_dict.items()}
transform = transform if transform is not None else lambda x: x
target_transform = (
target_transform if target_transform is not None else lambda x: x
)
self.data_transform = lambda x: self.full_transform(
x, transform, target_transform
)
@staticmethod
def full_transform(item, transform, target_transform):
dec = np.array([int(i) for i in item["x"].tobytes().decode("utf-8").split(" ")])
x = torch.from_numpy(transform(dec.copy()))
x = torch.nn.functional.one_hot(x.long(), 4).float()
y = torch.from_numpy(target_transform(item["y"].copy())).long().squeeze()
return x, y
def __iter__(self):
return map(self.data_transform, self.ds.__iter__())
class GenomicsDataset(DatasetSplit):
data_shape = (250,)
def __init__(self, data_root):
self.data_root = data_root
def train(self, transform):
return OODGenomics(self.data_root, split="train", transform=transform)
def val(self, transform):
return OODGenomics(self.data_root, split="val", transform=transform)
def test(self, transform):
return OODGenomics(self.data_root, split="test", transform=transform)
class OODGenomicsDataset(DatasetSplit):
data_shape = (250,)
def __init__(self, data_root):
self.data_root = data_root
def train(self, transform):
raise NotImplementedError
def val(self, transform):
return OODGenomics(self.data_root, split="val_ood", transform=transform)
def test(self, transform):
return OODGenomics(self.data_root, split="test_ood", transform=transform)
class ImageEmbeddingDataset(DatasetSplit):
data_shape = (640,)
def __init__(self, data_root, dataset_name):
self.data_root = data_root
self.dataset_name = dataset_name
def load_split(self, split):
data = np.load(
self.data_root / "embeddings" / f"{self.dataset_name}_{split}.npz"
)
return torch.from_numpy(data["x"]), torch.from_numpy(data["y"])
def train(self, transform):
return TabularDataset(*self.load_split("train"), transforms=transform)
def val(self, transform):
return TabularDataset(*self.load_split("val"), transforms=transform)
def test(self, transform):
return TabularDataset(*self.load_split("test"), transforms=transform)
class GenomicsNoise(DatasetSplit):
data_shape = (250,)
def __init__(self, data_root):
self.data_root = data_root
data = np.load(self.data_root / "genomics_noise.npz")
self.x = torch.from_numpy(data["x"])
self.y = torch.from_numpy(data["y"])
def train(self, transform):
raise NotImplementedError
def val(self, transform):
raise NotImplementedError
def test(self, transform):
return TensorDataset(self.x, self.y)
class GenomicsEmbeddingsDataset(ImageEmbeddingDataset):
data_shape = (128,)
| [
"torch.distributions.Uniform",
"torch.distributions.Normal",
"torch.from_numpy",
"torch.empty",
"torch.utils.data.TensorDataset"
] | 1.6.0 | selflein/nn_uncertainty_eval | 94a7f2292b8db2197cd55fab57324d438618ae06 |
1.0 | import os
import re
import logging
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import List, Union, Dict
import gensim
import numpy as np
import torch
from bpemb import BPEmb
from deprecated import deprecated
from pytorch_pretrained_bert import (
BertTokenizer,
BertModel,
TransfoXLTokenizer,
TransfoXLModel,
OpenAIGPTModel,
OpenAIGPTTokenizer,
)
from pytorch_pretrained_bert.modeling_openai import (
PRETRAINED_MODEL_ARCHIVE_MAP as OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
from pytorch_pretrained_bert.modeling_transfo_xl import (
PRETRAINED_MODEL_ARCHIVE_MAP as TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP,
)
import flair
from flair.data import Corpus
from .nn import LockedDropout, WordDropout
from .data import Dictionary, Token, Sentence
from .file_utils import cached_path, open_inside_zip
log = logging.getLogger("flair")
class Embeddings(torch.nn.Module):
"""Abstract base class for all embeddings. Every new type of embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
@abstractmethod
def embedding_type(self) -> str:
pass
def embed(self, sentences: Union[Sentence, List[Sentence]]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings
are non-static."""
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
everything_embedded: bool = True
if self.embedding_type == "word-level":
for sentence in sentences:
for token in sentence.tokens:
if self.name not in token._embeddings.keys():
everything_embedded = False
else:
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded or not self.static_embeddings:
self._add_embeddings_internal(sentences)
return sentences
@abstractmethod
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Private method for adding embeddings to all words in a list of sentences."""
pass
class TokenEmbeddings(Embeddings):
"""Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "word-level"
class DocumentEmbeddings(Embeddings):
"""Abstract base class for all document-level embeddings. Ever new type of document embedding must implement these methods."""
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
pass
@property
def embedding_type(self) -> str:
return "sentence-level"
class StackedEmbeddings(TokenEmbeddings):
"""A stack of embeddings, used if you need to combine several different embedding types."""
def __init__(self, embeddings: List[TokenEmbeddings], detach: bool = True):
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings = embeddings
# IMPORTANT: add embeddings as torch modules
for i, embedding in enumerate(embeddings):
self.add_module("list_embedding_{}".format(i), embedding)
self.detach: bool = detach
self.name: str = "Stack"
self.static_embeddings: bool = True
self.__embedding_type: str = embeddings[0].embedding_type
self.__embedding_length: int = 0
for embedding in embeddings:
self.__embedding_length += embedding.embedding_length
def embed(
self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True
):
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
@property
def embedding_type(self) -> str:
return self.__embedding_type
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for embedding in self.embeddings:
embedding._add_embeddings_internal(sentences)
return sentences
def __str__(self):
return f'StackedEmbeddings [{",".join([str(e) for e in self.embeddings])}]'
class WordEmbeddings(TokenEmbeddings):
"""Standard static word embeddings, such as GloVe or FastText."""
def __init__(self, embeddings: str, field: str = None):
"""
Initializes classic word embeddings. Constructor downloads required files if not there.
:param embeddings: one of: 'glove', 'extvec', 'crawl' or two-letter language code or custom
If you want to use a custom embedding file, just pass the path to the embeddings as embeddings variable.
"""
self.embeddings = embeddings
old_base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/"
)
base_path = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/"
)
embeddings_path_v4 = (
"https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/"
)
embeddings_path_v4_1 = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4.1/"
cache_dir = Path("embeddings")
# GLOVE embeddings
if embeddings.lower() == "glove" or embeddings.lower() == "en-glove":
cached_path(f"{old_base_path}glove.gensim.vectors.npy", cache_dir=cache_dir)
embeddings = cached_path(
f"{old_base_path}glove.gensim", cache_dir=cache_dir
)
# TURIAN embeddings
elif embeddings.lower() == "turian" or embeddings.lower() == "en-turian":
cached_path(
f"{embeddings_path_v4_1}turian.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{embeddings_path_v4_1}turian", cache_dir=cache_dir
)
# KOMNINOS embeddings
elif embeddings.lower() == "extvec" or embeddings.lower() == "en-extvec":
cached_path(
f"{old_base_path}extvec.gensim.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{old_base_path}extvec.gensim", cache_dir=cache_dir
)
# FT-CRAWL embeddings
elif embeddings.lower() == "crawl" or embeddings.lower() == "en-crawl":
cached_path(
f"{base_path}en-fasttext-crawl-300d-1M.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}en-fasttext-crawl-300d-1M", cache_dir=cache_dir
)
# FT-CRAWL embeddings
elif (
embeddings.lower() == "news"
or embeddings.lower() == "en-news"
or embeddings.lower() == "en"
):
cached_path(
f"{base_path}en-fasttext-news-300d-1M.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}en-fasttext-news-300d-1M", cache_dir=cache_dir
)
# twitter embeddings
elif embeddings.lower() == "twitter" or embeddings.lower() == "en-twitter":
cached_path(
f"{old_base_path}twitter.gensim.vectors.npy", cache_dir=cache_dir
)
embeddings = cached_path(
f"{old_base_path}twitter.gensim", cache_dir=cache_dir
)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 2:
cached_path(
f"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings}-wiki-fasttext-300d-1M",
cache_dir=cache_dir,
)
# two-letter language code wiki embeddings
elif len(embeddings.lower()) == 7 and embeddings.endswith("-wiki"):
cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-wiki-fasttext-300d-1M",
cache_dir=cache_dir,
)
# two-letter language code crawl embeddings
elif len(embeddings.lower()) == 8 and embeddings.endswith("-crawl"):
cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M.vectors.npy",
cache_dir=cache_dir,
)
embeddings = cached_path(
f"{embeddings_path_v4}{embeddings[:2]}-crawl-fasttext-300d-1M",
cache_dir=cache_dir,
)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = True
if str(embeddings).endswith(".bin"):
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
str(embeddings), binary=True
)
else:
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(
str(embeddings)
)
self.field = field
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
if word in self.precomputed_word_embeddings:
word_embedding = self.precomputed_word_embeddings[word]
elif word.lower() in self.precomputed_word_embeddings:
word_embedding = self.precomputed_word_embeddings[word.lower()]
elif (
re.sub(r"\d", "#", word.lower()) in self.precomputed_word_embeddings
):
word_embedding = self.precomputed_word_embeddings[
re.sub(r"\d", "#", word.lower())
]
elif (
re.sub(r"\d", "0", word.lower()) in self.precomputed_word_embeddings
):
word_embedding = self.precomputed_word_embeddings[
re.sub(r"\d", "0", word.lower())
]
else:
word_embedding = np.zeros(self.embedding_length, dtype="float")
word_embedding = torch.FloatTensor(word_embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return f"'{self.embeddings}'"
class OneHotEmbeddings(TokenEmbeddings):
"""One-hot encoded embeddings."""
def __init__(
self,
corpus=Union[Corpus, List[Sentence]],
field: str = "text",
embedding_length: int = 300,
min_freq: int = 3,
):
super().__init__()
self.name = "one-hot"
self.static_embeddings = False
self.min_freq = min_freq
tokens = list(map((lambda s: s.tokens), corpus.train))
tokens = [token for sublist in tokens for token in sublist]
if field == "text":
most_common = Counter(list(map((lambda t: t.text), tokens))).most_common()
else:
most_common = Counter(
list(map((lambda t: t.get_tag(field)), tokens))
).most_common()
tokens = []
for token, freq in most_common:
if freq < min_freq:
break
tokens.append(token)
self.vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
self.vocab_dictionary.add_item(token)
# max_tokens = 500
self.__embedding_length = embedding_length
print(self.vocab_dictionary.idx2item)
print(f"vocabulary size of {len(self.vocab_dictionary)}")
# model architecture
self.embedding_layer = torch.nn.Embedding(
len(self.vocab_dictionary), self.__embedding_length
)
torch.nn.init.xavier_uniform_(self.embedding_layer.weight)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
one_hot_sentences = []
for i, sentence in enumerate(sentences):
context_idxs = [
self.vocab_dictionary.get_idx_for_item(t.text) for t in sentence.tokens
]
one_hot_sentences.extend(context_idxs)
one_hot_sentences = torch.tensor(one_hot_sentences, dtype=torch.long).to(
flair.device
)
embedded = self.embedding_layer.forward(one_hot_sentences)
index = 0
for sentence in sentences:
for token in sentence:
embedding = embedded[index]
token.set_embedding(self.name, embedding)
index += 1
return sentences
def __str__(self):
return self.name
@property
def embedding_length(self) -> int:
return self.__embedding_length
def extra_repr(self):
return "min_freq={}".format(self.min_freq)
class BPEmbSerializable(BPEmb):
def __getstate__(self):
state = self.__dict__.copy()
# save the sentence piece model as binary file (not as path which may change)
state["spm_model_binary"] = open(self.model_file, mode="rb").read()
state["spm"] = None
return state
def __setstate__(self, state):
from bpemb.util import sentencepiece_load
model_file = self.model_tpl.format(lang=state["lang"], vs=state["vs"])
self.__dict__ = state
# write out the binary sentence piece model into the expected directory
self.cache_dir: Path = Path(flair.cache_root) / "embeddings"
if "spm_model_binary" in self.__dict__:
# if the model was saved as binary and it is not found on disk, write to appropriate path
if not os.path.exists(self.cache_dir / state["lang"]):
os.makedirs(self.cache_dir / state["lang"])
self.model_file = self.cache_dir / model_file
with open(self.model_file, "wb") as out:
out.write(self.__dict__["spm_model_binary"])
else:
# otherwise, use normal process and potentially trigger another download
self.model_file = self._load_file(model_file)
# once the modes if there, load it with sentence piece
state["spm"] = sentencepiece_load(self.model_file)
class BytePairEmbeddings(TokenEmbeddings):
def __init__(
self,
language: str,
dim: int = 50,
syllables: int = 100000,
cache_dir=Path(flair.cache_root) / "embeddings",
):
"""
Initializes BP embeddings. Constructor downloads required files if not there.
"""
self.name: str = f"bpe-{language}-{syllables}-{dim}"
self.static_embeddings = True
self.embedder = BPEmbSerializable(
lang=language, vs=syllables, dim=dim, cache_dir=cache_dir
)
self.__embedding_length: int = self.embedder.emb.vector_size * 2
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
if "field" not in self.__dict__ or self.field is None:
word = token.text
else:
word = token.get_tag(self.field).value
if word.strip() == "":
# empty words get no embedding
token.set_embedding(
self.name, torch.zeros(self.embedding_length, dtype=torch.float)
)
else:
# all other words get embedded
embeddings = self.embedder.embed(word.lower())
embedding = np.concatenate(
(embeddings[0], embeddings[len(embeddings) - 1])
)
token.set_embedding(
self.name, torch.tensor(embedding, dtype=torch.float)
)
return sentences
def __str__(self):
return self.name
def extra_repr(self):
return "model={}".format(self.name)
class ELMoEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level LM, as proposed in Peters et al., 2018."""
def __init__(
self, model: str = "original", options_file: str = None, weight_file: str = None
):
super().__init__()
try:
import allennlp.commands.elmo
except:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
'To use ELMoEmbeddings, please first install with "pip install allennlp"'
)
log.warning("-" * 100)
pass
self.name = "elmo-" + model
self.static_embeddings = True
if not options_file or not weight_file:
# the default model for ELMo is the 'original' model, which is very large
options_file = allennlp.commands.elmo.DEFAULT_OPTIONS_FILE
weight_file = allennlp.commands.elmo.DEFAULT_WEIGHT_FILE
# alternatively, a small, medium or portuguese model can be selected by passing the appropriate mode name
if model == "small":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x1024_128_2048cnn_1xhighway/elmo_2x1024_128_2048cnn_1xhighway_weights.hdf5"
if model == "medium":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x2048_256_2048cnn_1xhighway/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
if model == "pt" or model == "portuguese":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pt/elmo_pt_weights.hdf5"
if model == "pubmed":
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/contributed/pubmed/elmo_2x4096_512_2048cnn_2xhighway_weights_PubMed_only.hdf5"
# put on Cuda if available
from flair import device
if re.fullmatch(r'cuda:[0-9]+', str(device)):
cuda_device = int(str(device).split(':')[-1])
elif str(device) == "cpu":
cuda_device = -1
else:
cuda_device = 0
self.ee = allennlp.commands.elmo.ElmoEmbedder(
options_file=options_file, weight_file=weight_file, cuda_device=cuda_device
)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
sentence_words: List[List[str]] = []
for sentence in sentences:
sentence_words.append([token.text for token in sentence])
embeddings = self.ee.embed_batch(sentence_words)
for i, sentence in enumerate(sentences):
sentence_embeddings = embeddings[i]
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embedding = torch.cat(
[
torch.FloatTensor(sentence_embeddings[0, token_idx, :]),
torch.FloatTensor(sentence_embeddings[1, token_idx, :]),
torch.FloatTensor(sentence_embeddings[2, token_idx, :]),
],
0,
)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class ELMoTransformerEmbeddings(TokenEmbeddings):
"""Contextual word embeddings using word-level Transformer-based LM, as proposed in Peters et al., 2018."""
def __init__(self, model_file: str):
super().__init__()
try:
from allennlp.modules.token_embedders.bidirectional_language_model_token_embedder import (
BidirectionalLanguageModelTokenEmbedder,
)
from allennlp.data.token_indexers.elmo_indexer import (
ELMoTokenCharactersIndexer,
)
except:
log.warning("-" * 100)
log.warning('ATTENTION! The library "allennlp" is not installed!')
log.warning(
"To use ELMoTransformerEmbeddings, please first install a recent version from https://github.com/allenai/allennlp"
)
log.warning("-" * 100)
pass
self.name = "elmo-transformer"
self.static_embeddings = True
self.lm_embedder = BidirectionalLanguageModelTokenEmbedder(
archive_file=model_file,
dropout=0.2,
bos_eos_tokens=("<S>", "</S>"),
remove_bos_eos=True,
requires_grad=False,
)
self.lm_embedder = self.lm_embedder.to(device=flair.device)
self.vocab = self.lm_embedder._lm.vocab
self.indexer = ELMoTokenCharactersIndexer()
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# Avoid conflicts with flair's Token class
import allennlp.data.tokenizers.token as allen_nlp_token
indexer = self.indexer
vocab = self.vocab
for sentence in sentences:
character_indices = indexer.tokens_to_indices(
[allen_nlp_token.Token(token.text) for token in sentence], vocab, "elmo"
)["elmo"]
indices_tensor = torch.LongTensor([character_indices])
indices_tensor = indices_tensor.to(device=flair.device)
embeddings = self.lm_embedder(indices_tensor)[0].detach().cpu().numpy()
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
embedding = embeddings[token_idx]
word_embedding = torch.FloatTensor(embedding)
token.set_embedding(self.name, word_embedding)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class TransformerXLEmbeddings(TokenEmbeddings):
def __init__(self, model: str = "transfo-xl-wt103"):
"""Transformer-XL embeddings, as proposed in Dai et al., 2019.
:param model: name of Transformer-XL model
"""
super().__init__()
if model not in TRANSFORMER_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys():
raise ValueError("Provided Transformer-XL model is not available.")
self.tokenizer = TransfoXLTokenizer.from_pretrained(model)
self.model = TransfoXLModel.from_pretrained(model)
self.name = model
self.static_embeddings = True
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
with torch.no_grad():
for sentence in sentences:
token_strings = [token.text for token in sentence.tokens]
indexed_tokens = self.tokenizer.convert_tokens_to_ids(token_strings)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states, _ = self.model(tokens_tensor)
for token, token_idx in zip(
sentence.tokens, range(len(sentence.tokens))
):
token.set_embedding(self.name, hidden_states[0][token_idx])
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class OpenAIGPTEmbeddings(TokenEmbeddings):
def __init__(
self, model: str = "openai-gpt", pooling_operation: str = "first_last"
):
"""OpenAI GPT embeddings, as proposed in Radford et al. 2018.
:param model: name of OpenAI GPT model
:param pooling_operation: defines pooling operation for subwords
"""
super().__init__()
if model not in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP.keys():
raise ValueError("Provided OpenAI GPT model is not available.")
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(model)
self.model = OpenAIGPTModel.from_pretrained(model)
self.name = model
self.static_embeddings = True
self.pooling_operation = pooling_operation
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.model.to(flair.device)
self.model.eval()
with torch.no_grad():
for sentence in sentences:
for token in sentence.tokens:
token_text = token.text
subwords = self.tokenizer.tokenize(token_text)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(subwords)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = self.model(tokens_tensor)
if self.pooling_operation == "first":
# Use embedding of first subword
token.set_embedding(self.name, hidden_states[0][0])
elif self.pooling_operation == "last":
last_embedding = hidden_states[0][len(hidden_states[0]) - 1]
token.set_embedding(self.name, last_embedding)
elif self.pooling_operation == "first_last":
# Use embedding of first and last subword
first_embedding = hidden_states[0][0]
last_embedding = hidden_states[0][len(hidden_states[0]) - 1]
final_embedding = torch.cat([first_embedding, last_embedding])
token.set_embedding(self.name, final_embedding)
else:
# Otherwise, use mean over all subwords in token
all_embeddings = [
embedding.unsqueeze(0) for embedding in hidden_states[0]
]
mean = torch.mean(torch.cat(all_embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
return sentences
def extra_repr(self):
return "model={}".format(self.name)
def __str__(self):
return self.name
class CharacterEmbeddings(TokenEmbeddings):
"""Character embeddings of words, as proposed in Lample et al., 2016."""
def __init__(self, path_to_char_dict: str = None, char_embedding_dim: int = 25, hidden_size_char: int = 25):
"""Uses the default character dictionary if none provided."""
super().__init__()
self.name = "Char"
self.static_embeddings = False
# use list of common characters if none provided
if path_to_char_dict is None:
self.char_dictionary: Dictionary = Dictionary.load("common-chars")
else:
self.char_dictionary: Dictionary = Dictionary.load_from_file(
path_to_char_dict
)
self.char_embedding_dim: int = char_embedding_dim
self.hidden_size_char: int = hidden_size_char
self.char_embedding = torch.nn.Embedding(
len(self.char_dictionary.item2idx), self.char_embedding_dim
)
self.char_rnn = torch.nn.LSTM(
self.char_embedding_dim,
self.hidden_size_char,
num_layers=1,
bidirectional=True,
)
self.__embedding_length = self.char_embedding_dim * 2
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
for sentence in sentences:
tokens_char_indices = []
# translate words in sentence into ints using dictionary
for token in sentence.tokens:
char_indices = [
self.char_dictionary.get_idx_for_item(char) for char in token.text
]
tokens_char_indices.append(char_indices)
# sort words by length, for batching and masking
tokens_sorted_by_length = sorted(
tokens_char_indices, key=lambda p: len(p), reverse=True
)
d = {}
for i, ci in enumerate(tokens_char_indices):
for j, cj in enumerate(tokens_sorted_by_length):
if ci == cj:
d[j] = i
continue
chars2_length = [len(c) for c in tokens_sorted_by_length]
longest_token_in_sentence = max(chars2_length)
tokens_mask = torch.zeros(
(len(tokens_sorted_by_length), longest_token_in_sentence),
dtype=torch.long,
device=flair.device,
)
for i, c in enumerate(tokens_sorted_by_length):
tokens_mask[i, : chars2_length[i]] = torch.tensor(
c, dtype=torch.long, device=flair.device
)
# chars for rnn processing
chars = tokens_mask
character_embeddings = self.char_embedding(chars).transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(
character_embeddings, chars2_length
)
lstm_out, self.hidden = self.char_rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = outputs.transpose(0, 1)
chars_embeds_temp = torch.zeros(
(outputs.size(0), outputs.size(2)),
dtype=torch.float,
device=flair.device,
)
for i, index in enumerate(output_lengths):
chars_embeds_temp[i] = outputs[i, index - 1]
character_embeddings = chars_embeds_temp.clone()
for i in range(character_embeddings.size(0)):
character_embeddings[d[i]] = chars_embeds_temp[i]
for token_number, token in enumerate(sentence.tokens):
token.set_embedding(self.name, character_embeddings[token_number])
def __str__(self):
return self.name
class FlairEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018."""
def __init__(
self,
model: str,
use_cache: bool = False,
cache_directory: Path = None,
chars_per_chunk: int = 512,
):
"""
initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
:param chars_per_chunk: max number of chars per rnn pass to control speed/memory tradeoff. Higher means faster but requires
more memory. Lower means slower but less memory.
"""
super().__init__()
cache_dir = Path("embeddings")
aws_path: str = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources"
self.PRETRAINED_MODEL_ARCHIVE_MAP = {
# multilingual models
"multi-forward": f"{aws_path}/embeddings-v0.4/lm-multi-forward-v0.1.pt",
"multi-backward": f"{aws_path}/embeddings-v0.4/lm-multi-backward-v0.1.pt",
"multi-forward-fast": f"{aws_path}/embeddings-v0.4/lm-multi-forward-fast-v0.1.pt",
"multi-backward-fast": f"{aws_path}/embeddings-v0.4/lm-multi-backward-fast-v0.1.pt",
# English models
"news-forward": f"{aws_path}/embeddings-v0.4.1/big-news-forward--h2048-l1-d0.05-lr30-0.25-20/news-forward-0.4.1.pt",
"news-backward": f"{aws_path}/embeddings-v0.4.1/big-news-backward--h2048-l1-d0.05-lr30-0.25-20/news-backward-0.4.1.pt",
"news-forward-fast": f"{aws_path}/embeddings/lm-news-english-forward-1024-v0.2rc.pt",
"news-backward-fast": f"{aws_path}/embeddings/lm-news-english-backward-1024-v0.2rc.pt",
"mix-forward": f"{aws_path}/embeddings/lm-mix-english-forward-v0.2rc.pt",
"mix-backward": f"{aws_path}/embeddings/lm-mix-english-backward-v0.2rc.pt",
# Arabic
"ar-forward": f"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-forward-v0.1.pt",
"ar-backward": f"{aws_path}/embeddings-stefan-it/lm-ar-opus-large-backward-v0.1.pt",
# Bulgarian
"bg-forward-fast": f"{aws_path}/embeddings-v0.3/lm-bg-small-forward-v0.1.pt",
"bg-backward-fast": f"{aws_path}/embeddings-v0.3/lm-bg-small-backward-v0.1.pt",
"bg-forward": f"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-forward-v0.1.pt",
"bg-backward": f"{aws_path}/embeddings-stefan-it/lm-bg-opus-large-backward-v0.1.pt",
# Czech
"cs-forward": f"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-forward-v0.1.pt",
"cs-backward": f"{aws_path}/embeddings-stefan-it/lm-cs-opus-large-backward-v0.1.pt",
"cs-v0-forward": f"{aws_path}/embeddings-v0.4/lm-cs-large-forward-v0.1.pt",
"cs-v0-backward": f"{aws_path}/embeddings-v0.4/lm-cs-large-backward-v0.1.pt",
# Danish
"da-forward": f"{aws_path}/embeddings-stefan-it/lm-da-opus-large-forward-v0.1.pt",
"da-backward": f"{aws_path}/embeddings-stefan-it/lm-da-opus-large-backward-v0.1.pt",
# German
"de-forward": f"{aws_path}/embeddings/lm-mix-german-forward-v0.2rc.pt",
"de-backward": f"{aws_path}/embeddings/lm-mix-german-backward-v0.2rc.pt",
"de-historic-ha-forward": f"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-forward-v0.1.pt",
"de-historic-ha-backward": f"{aws_path}/embeddings-stefan-it/lm-historic-hamburger-anzeiger-backward-v0.1.pt",
"de-historic-wz-forward": f"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-forward-v0.1.pt",
"de-historic-wz-backward": f"{aws_path}/embeddings-stefan-it/lm-historic-wiener-zeitung-backward-v0.1.pt",
# Spanish
"es-forward": f"{aws_path}/embeddings-v0.4/language_model_es_forward_long/lm-es-forward.pt",
"es-backward": f"{aws_path}/embeddings-v0.4/language_model_es_backward_long/lm-es-backward.pt",
"es-forward-fast": f"{aws_path}/embeddings-v0.4/language_model_es_forward/lm-es-forward-fast.pt",
"es-backward-fast": f"{aws_path}/embeddings-v0.4/language_model_es_backward/lm-es-backward-fast.pt",
# Basque
"eu-forward": f"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-forward-v0.1.pt",
"eu-backward": f"{aws_path}/embeddings-stefan-it/lm-eu-opus-large-backward-v0.1.pt",
"eu-v0-forward": f"{aws_path}/embeddings-v0.4/lm-eu-large-forward-v0.1.pt",
"eu-v0-backward": f"{aws_path}/embeddings-v0.4/lm-eu-large-backward-v0.1.pt",
# Persian
"fa-forward": f"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-forward-v0.1.pt",
"fa-backward": f"{aws_path}/embeddings-stefan-it/lm-fa-opus-large-backward-v0.1.pt",
# Finnish
"fi-forward": f"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-forward-v0.1.pt",
"fi-backward": f"{aws_path}/embeddings-stefan-it/lm-fi-opus-large-backward-v0.1.pt",
# French
"fr-forward": f"{aws_path}/embeddings/lm-fr-charlm-forward.pt",
"fr-backward": f"{aws_path}/embeddings/lm-fr-charlm-backward.pt",
# Hebrew
"he-forward": f"{aws_path}/embeddings-stefan-it/lm-he-opus-large-forward-v0.1.pt",
"he-backward": f"{aws_path}/embeddings-stefan-it/lm-he-opus-large-backward-v0.1.pt",
# Hindi
"hi-forward": f"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-forward-v0.1.pt",
"hi-backward": f"{aws_path}/embeddings-stefan-it/lm-hi-opus-large-backward-v0.1.pt",
# Croatian
"hr-forward": f"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-forward-v0.1.pt",
"hr-backward": f"{aws_path}/embeddings-stefan-it/lm-hr-opus-large-backward-v0.1.pt",
# Indonesian
"id-forward": f"{aws_path}/embeddings-stefan-it/lm-id-opus-large-forward-v0.1.pt",
"id-backward": f"{aws_path}/embeddings-stefan-it/lm-id-opus-large-backward-v0.1.pt",
# Italian
"it-forward": f"{aws_path}/embeddings-stefan-it/lm-it-opus-large-forward-v0.1.pt",
"it-backward": f"{aws_path}/embeddings-stefan-it/lm-it-opus-large-backward-v0.1.pt",
# Japanese
"ja-forward": f"{aws_path}/embeddings-v0.4.1/lm__char-forward__ja-wikipedia-3GB/japanese-forward.pt",
"ja-backward": f"{aws_path}/embeddings-v0.4.1/lm__char-backward__ja-wikipedia-3GB/japanese-backward.pt",
# Dutch
"nl-forward": f"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-forward-v0.1.pt",
"nl-backward": f"{aws_path}/embeddings-stefan-it/lm-nl-opus-large-backward-v0.1.pt",
"nl-v0-forward": f"{aws_path}/embeddings-v0.4/lm-nl-large-forward-v0.1.pt",
"nl-v0-backward": f"{aws_path}/embeddings-v0.4/lm-nl-large-backward-v0.1.pt",
# Norwegian
"no-forward": f"{aws_path}/embeddings-stefan-it/lm-no-opus-large-forward-v0.1.pt",
"no-backward": f"{aws_path}/embeddings-stefan-it/lm-no-opus-large-backward-v0.1.pt",
# Polish
"pl-forward": f"{aws_path}/embeddings/lm-polish-forward-v0.2.pt",
"pl-backward": f"{aws_path}/embeddings/lm-polish-backward-v0.2.pt",
"pl-opus-forward": f"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-forward-v0.1.pt",
"pl-opus-backward": f"{aws_path}/embeddings-stefan-it/lm-pl-opus-large-backward-v0.1.pt",
# Portuguese
"pt-forward": f"{aws_path}/embeddings-v0.4/lm-pt-forward.pt",
"pt-backward": f"{aws_path}/embeddings-v0.4/lm-pt-backward.pt",
# Pubmed
"pubmed-forward": f"{aws_path}/embeddings-v0.4.1/pubmed-2015-fw-lm.pt",
"pubmed-backward": f"{aws_path}/embeddings-v0.4.1/pubmed-2015-bw-lm.pt",
# Slovenian
"sl-forward": f"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-forward-v0.1.pt",
"sl-backward": f"{aws_path}/embeddings-stefan-it/lm-sl-opus-large-backward-v0.1.pt",
"sl-v0-forward": f"{aws_path}/embeddings-v0.3/lm-sl-large-forward-v0.1.pt",
"sl-v0-backward": f"{aws_path}/embeddings-v0.3/lm-sl-large-backward-v0.1.pt",
# Swedish
"sv-forward": f"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-forward-v0.1.pt",
"sv-backward": f"{aws_path}/embeddings-stefan-it/lm-sv-opus-large-backward-v0.1.pt",
"sv-v0-forward": f"{aws_path}/embeddings-v0.4/lm-sv-large-forward-v0.1.pt",
"sv-v0-backward": f"{aws_path}/embeddings-v0.4/lm-sv-large-backward-v0.1.pt",
}
# load model if in pretrained model map
if model.lower() in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[model.lower()]
model = cached_path(base_path, cache_dir=cache_dir)
elif replace_with_language_code(model) in self.PRETRAINED_MODEL_ARCHIVE_MAP:
base_path = self.PRETRAINED_MODEL_ARCHIVE_MAP[
replace_with_language_code(model)
]
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(
f'The given model "{model}" is not available or is not a valid path.'
)
self.name = str(model)
self.static_embeddings = True
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.is_forward_lm: bool = self.lm.is_forward_lm
self.chars_per_chunk: int = chars_per_chunk
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# make compatible with serialized models
if "chars_per_chunk" not in self.__dict__:
self.chars_per_chunk = 512
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
with torch.no_grad():
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))
# pad strings with whitespaces to longest sentence
sentences_padded: List[str] = []
append_padded_sentence = sentences_padded.append
start_marker = "\n"
end_marker = " "
extra_offset = len(start_marker)
for sentence_text in text_sentences:
pad_by = longest_character_sequence_in_batch - len(sentence_text)
if self.is_forward_lm:
padded = "{}{}{}{}".format(
start_marker, sentence_text, end_marker, pad_by * " "
)
append_padded_sentence(padded)
else:
padded = "{}{}{}{}".format(
start_marker, sentence_text[::-1], end_marker, pad_by * " "
)
append_padded_sentence(padded)
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(
sentences_padded, self.chars_per_chunk
)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = extra_offset
offset_backward: int = len(sentence_text) + extra_offset
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset = offset_forward
else:
offset = offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding.clone().detach())
all_hidden_states_in_lm = None
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self):
return self.name
class PooledFlairEmbeddings(TokenEmbeddings):
def __init__(
self,
contextual_embeddings: Union[str, FlairEmbeddings],
pooling: str = "min",
only_capitalized: bool = False,
**kwargs,
):
super().__init__()
# use the character language model embeddings as basis
if type(contextual_embeddings) is str:
self.context_embeddings: FlairEmbeddings = FlairEmbeddings(
contextual_embeddings, **kwargs
)
else:
self.context_embeddings: FlairEmbeddings = contextual_embeddings
# length is twice the original character LM embedding length
self.embedding_length = self.context_embeddings.embedding_length * 2
self.name = self.context_embeddings.name + "-context"
# these fields are for the embedding memory
self.word_embeddings = {}
self.word_count = {}
# whether to add only capitalized words to memory (faster runtime and lower memory consumption)
self.only_capitalized = only_capitalized
# we re-compute embeddings dynamically at each epoch
self.static_embeddings = False
# set the memory method
self.pooling = pooling
if pooling == "mean":
self.aggregate_op = torch.add
elif pooling == "fade":
self.aggregate_op = torch.add
elif pooling == "max":
self.aggregate_op = torch.max
elif pooling == "min":
self.aggregate_op = torch.min
def train(self, mode=True):
super().train(mode=mode)
if mode:
# memory is wiped each time we do a training run
print("train mode resetting embeddings")
self.word_embeddings = {}
self.word_count = {}
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
self.context_embeddings.embed(sentences)
# if we keep a pooling, it needs to be updated continuously
for sentence in sentences:
for token in sentence.tokens:
# update embedding
local_embedding = token._embeddings[self.context_embeddings.name]
local_embedding = local_embedding.to(flair.device)
if token.text[0].isupper() or not self.only_capitalized:
if token.text not in self.word_embeddings:
self.word_embeddings[token.text] = local_embedding
self.word_count[token.text] = 1
else:
aggregated_embedding = self.aggregate_op(
self.word_embeddings[token.text], local_embedding
)
if self.pooling == "fade":
aggregated_embedding /= 2
self.word_embeddings[token.text] = aggregated_embedding
self.word_count[token.text] += 1
# add embeddings after updating
for sentence in sentences:
for token in sentence.tokens:
if token.text in self.word_embeddings:
base = (
self.word_embeddings[token.text] / self.word_count[token.text]
if self.pooling == "mean"
else self.word_embeddings[token.text]
)
else:
base = token._embeddings[self.context_embeddings.name]
token.set_embedding(self.name, base)
return sentences
def embedding_length(self) -> int:
return self.embedding_length
class BertEmbeddings(TokenEmbeddings):
def __init__(
self,
bert_model_or_path: str = "bert-base-uncased",
layers: str = "-1,-2,-3,-4",
pooling_operation: str = "first",
):
"""
Bidirectional transformer embeddings of words, as proposed in Devlin et al., 2018.
:param bert_model_or_path: name of BERT model ('') or directory path containing custom model, configuration file
and vocab file (names of three files should be - bert_config.json, pytorch_model.bin/model.chkpt, vocab.txt)
:param layers: string indicating which layers to take for embedding
:param pooling_operation: how to get from token piece embeddings to token embedding. Either pool them and take
the average ('mean') or use first word piece embedding as token embedding ('first)
"""
super().__init__()
self.tokenizer = BertTokenizer.from_pretrained(bert_model_or_path)
self.model = BertModel.from_pretrained(bert_model_or_path)
self.layer_indexes = [int(x) for x in layers.split(",")]
self.pooling_operation = pooling_operation
self.name = str(bert_model_or_path)
self.static_embeddings = True
class BertInputFeatures(object):
"""Private helper class for holding BERT-formatted features"""
def __init__(
self,
unique_id,
tokens,
input_ids,
input_mask,
input_type_ids,
token_subtoken_count,
):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
self.token_subtoken_count = token_subtoken_count
def _convert_sentences_to_features(
self, sentences, max_sequence_length: int
) -> [BertInputFeatures]:
max_sequence_length = max_sequence_length + 2
features: List[BertEmbeddings.BertInputFeatures] = []
for (sentence_index, sentence) in enumerate(sentences):
bert_tokenization: List[str] = []
token_subtoken_count: Dict[int, int] = {}
for token in sentence:
subtokens = self.tokenizer.tokenize(token.text)
bert_tokenization.extend(subtokens)
token_subtoken_count[token.idx] = len(subtokens)
if len(bert_tokenization) > max_sequence_length - 2:
bert_tokenization = bert_tokenization[0 : (max_sequence_length - 2)]
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in bert_tokenization:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_sequence_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
features.append(
BertEmbeddings.BertInputFeatures(
unique_id=sentence_index,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids,
token_subtoken_count=token_subtoken_count,
)
)
return features
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added,
updates only if embeddings are non-static."""
# first, find longest sentence in batch
longest_sentence_in_batch: int = len(
max(
[
self.tokenizer.tokenize(sentence.to_tokenized_string())
for sentence in sentences
],
key=len,
)
)
# prepare id maps for BERT model
features = self._convert_sentences_to_features(
sentences, longest_sentence_in_batch
)
all_input_ids = torch.LongTensor([f.input_ids for f in features]).to(
flair.device
)
all_input_masks = torch.LongTensor([f.input_mask for f in features]).to(
flair.device
)
# put encoded batch through BERT model to get all hidden states of all encoder layers
self.model.to(flair.device)
self.model.eval()
all_encoder_layers, _ = self.model(
all_input_ids, token_type_ids=None, attention_mask=all_input_masks
)
with torch.no_grad():
for sentence_index, sentence in enumerate(sentences):
feature = features[sentence_index]
# get aggregated embeddings for each BERT-subtoken in sentence
subtoken_embeddings = []
for token_index, _ in enumerate(feature.tokens):
all_layers = []
for layer_index in self.layer_indexes:
layer_output = (
all_encoder_layers[int(layer_index)]
.detach()
.cpu()[sentence_index]
)
all_layers.append(layer_output[token_index])
subtoken_embeddings.append(torch.cat(all_layers))
# get the current sentence object
token_idx = 0
for token in sentence:
# add concatenated embedding to sentence
token_idx += 1
if self.pooling_operation == "first":
# use first subword embedding if pooling operation is 'first'
token.set_embedding(self.name, subtoken_embeddings[token_idx])
else:
# otherwise, do a mean over all subwords in token
embeddings = subtoken_embeddings[
token_idx : token_idx
+ feature.token_subtoken_count[token.idx]
]
embeddings = [
embedding.unsqueeze(0) for embedding in embeddings
]
mean = torch.mean(torch.cat(embeddings, dim=0), dim=0)
token.set_embedding(self.name, mean)
token_idx += feature.token_subtoken_count[token.idx] - 1
return sentences
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
return len(self.layer_indexes) * self.model.config.hidden_size
class CharLMEmbeddings(TokenEmbeddings):
"""Contextual string embeddings of words, as proposed in Akbik et al., 2018. """
@deprecated(version="0.4", reason="Use 'FlairEmbeddings' instead.")
def __init__(
self,
model: str,
detach: bool = True,
use_cache: bool = False,
cache_directory: Path = None,
):
"""
initializes contextual string embeddings using a character-level language model.
:param model: model string, one of 'news-forward', 'news-backward', 'news-forward-fast', 'news-backward-fast',
'mix-forward', 'mix-backward', 'german-forward', 'german-backward', 'polish-backward', 'polish-forward'
depending on which character language model is desired.
:param detach: if set to False, the gradient will propagate into the language model. this dramatically slows down
training and often leads to worse results, so not recommended.
:param use_cache: if set to False, will not write embeddings to file for later retrieval. this saves disk space but will
not allow re-use of once computed embeddings that do not fit into memory
:param cache_directory: if cache_directory is not set, the cache will be written to ~/.flair/embeddings. otherwise the cache
is written to the provided directory.
"""
super().__init__()
cache_dir = Path("embeddings")
# multilingual forward (English, German, French, Italian, Dutch, Polish)
if model.lower() == "multi-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# multilingual backward (English, German, French, Italian, Dutch, Polish)
elif model.lower() == "multi-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-multi-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-forward
elif model.lower() == "news-forward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-forward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# news-english-backward
elif model.lower() == "news-backward-fast":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-news-english-backward-1024-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-forward
elif model.lower() == "mix-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-english-backward
elif model.lower() == "mix-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-english-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-forward
elif model.lower() == "german-forward" or model.lower() == "de-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-forward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# mix-german-backward
elif model.lower() == "german-backward" or model.lower() == "de-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-mix-german-backward-v0.2rc.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish forward
elif model.lower() == "polish-forward" or model.lower() == "pl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-forward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# common crawl Polish backward
elif model.lower() == "polish-backward" or model.lower() == "pl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-polish-backward-v0.2.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian forward
elif model.lower() == "slovenian-forward" or model.lower() == "sl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Slovenian backward
elif model.lower() == "slovenian-backward" or model.lower() == "sl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-sl-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian forward
elif model.lower() == "bulgarian-forward" or model.lower() == "bg-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Bulgarian backward
elif model.lower() == "bulgarian-backward" or model.lower() == "bg-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.3/lm-bg-small-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch forward
elif model.lower() == "dutch-forward" or model.lower() == "nl-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Dutch backward
elif model.lower() == "dutch-backward" or model.lower() == "nl-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-nl-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish forward
elif model.lower() == "swedish-forward" or model.lower() == "sv-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Swedish backward
elif model.lower() == "swedish-backward" or model.lower() == "sv-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-sv-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French forward
elif model.lower() == "french-forward" or model.lower() == "fr-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# French backward
elif model.lower() == "french-backward" or model.lower() == "fr-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings/lm-fr-charlm-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech forward
elif model.lower() == "czech-forward" or model.lower() == "cs-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-forward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Czech backward
elif model.lower() == "czech-backward" or model.lower() == "cs-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-cs-large-backward-v0.1.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese forward
elif model.lower() == "portuguese-forward" or model.lower() == "pt-forward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-forward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
# Portuguese backward
elif model.lower() == "portuguese-backward" or model.lower() == "pt-backward":
base_path = "https://s3.eu-central-1.amazonaws.com/alan-nlp/resources/embeddings-v0.4/lm-pt-backward.pt"
model = cached_path(base_path, cache_dir=cache_dir)
elif not Path(model).exists():
raise ValueError(
f'The given model "{model}" is not available or is not a valid path.'
)
self.name = str(model)
self.static_embeddings = detach
from flair.models import LanguageModel
self.lm = LanguageModel.load_language_model(model)
self.detach = detach
self.is_forward_lm: bool = self.lm.is_forward_lm
# initialize cache if use_cache set
self.cache = None
if use_cache:
cache_path = (
Path(f"{self.name}-tmp-cache.sqllite")
if not cache_directory
else cache_directory / f"{self.name}-tmp-cache.sqllite"
)
from sqlitedict import SqliteDict
self.cache = SqliteDict(str(cache_path), autocommit=True)
# embed a dummy sentence to determine embedding_length
dummy_sentence: Sentence = Sentence()
dummy_sentence.add_token(Token("hello"))
embedded_dummy = self.embed(dummy_sentence)
self.__embedding_length: int = len(
embedded_dummy[0].get_token(1).get_embedding()
)
# set to eval mode
self.eval()
def train(self, mode=True):
pass
def __getstate__(self):
# Copy the object's state from self.__dict__ which contains
# all our instance attributes. Always use the dict.copy()
# method to avoid modifying the original state.
state = self.__dict__.copy()
# Remove the unpicklable entries.
state["cache"] = None
return state
@property
def embedding_length(self) -> int:
return self.__embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
# if cache is used, try setting embeddings from cache first
if "cache" in self.__dict__ and self.cache is not None:
# try populating embeddings from cache
all_embeddings_retrieved_from_cache: bool = True
for sentence in sentences:
key = sentence.to_tokenized_string()
embeddings = self.cache.get(key)
if not embeddings:
all_embeddings_retrieved_from_cache = False
break
else:
for token, embedding in zip(sentence, embeddings):
token.set_embedding(self.name, torch.FloatTensor(embedding))
if all_embeddings_retrieved_from_cache:
return sentences
# if this is not possible, use LM to generate embedding. First, get text sentences
text_sentences = [sentence.to_tokenized_string() for sentence in sentences]
longest_character_sequence_in_batch: int = len(max(text_sentences, key=len))
# pad strings with whitespaces to longest sentence
sentences_padded: List[str] = []
append_padded_sentence = sentences_padded.append
end_marker = " "
extra_offset = 1
for sentence_text in text_sentences:
pad_by = longest_character_sequence_in_batch - len(sentence_text)
if self.is_forward_lm:
padded = "\n{}{}{}".format(sentence_text, end_marker, pad_by * " ")
append_padded_sentence(padded)
else:
padded = "\n{}{}{}".format(
sentence_text[::-1], end_marker, pad_by * " "
)
append_padded_sentence(padded)
# get hidden states from language model
all_hidden_states_in_lm = self.lm.get_representation(sentences_padded)
# take first or last hidden states from language model as word representation
for i, sentence in enumerate(sentences):
sentence_text = sentence.to_tokenized_string()
offset_forward: int = extra_offset
offset_backward: int = len(sentence_text) + extra_offset
for token in sentence.tokens:
offset_forward += len(token.text)
if self.is_forward_lm:
offset = offset_forward
else:
offset = offset_backward
embedding = all_hidden_states_in_lm[offset, i, :]
# if self.tokenized_lm or token.whitespace_after:
offset_forward += 1
offset_backward -= 1
offset_backward -= len(token.text)
token.set_embedding(self.name, embedding)
if "cache" in self.__dict__ and self.cache is not None:
for sentence in sentences:
self.cache[sentence.to_tokenized_string()] = [
token._embeddings[self.name].tolist() for token in sentence
]
return sentences
def __str__(self):
return self.name
class DocumentMeanEmbeddings(DocumentEmbeddings):
@deprecated(
version="0.3.1",
reason="The functionality of this class is moved to 'DocumentPoolEmbeddings'",
)
def __init__(self, token_embeddings: List[TokenEmbeddings]):
"""The constructor takes a list of embeddings to be combined."""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(
embeddings=token_embeddings
)
self.name: str = "document_mean"
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static."""
everything_embedded: bool = True
# if only one sentence is passed, convert to list of sentence
if type(sentences) is Sentence:
sentences = [sentences]
for sentence in sentences:
if self.name not in sentence._embeddings.keys():
everything_embedded = False
if not everything_embedded:
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
mean_embedding = torch.mean(word_embeddings, 0)
sentence.set_embedding(self.name, mean_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentPoolEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
fine_tune_mode="linear",
pooling: str = "mean",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param pooling: a string which can any value from ['mean', 'max', 'min']
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.__embedding_length = self.embeddings.embedding_length
# optional fine-tuning on top of embedding layer
self.fine_tune_mode = fine_tune_mode
if self.fine_tune_mode in ["nonlinear", "linear"]:
self.embedding_flex = torch.nn.Linear(
self.embedding_length, self.embedding_length, bias=False
)
self.embedding_flex.weight.data.copy_(torch.eye(self.embedding_length))
if self.fine_tune_mode in ["nonlinear"]:
self.embedding_flex_nonlinear = torch.nn.ReLU(self.embedding_length)
self.embedding_flex_nonlinear_map = torch.nn.Linear(
self.embedding_length, self.embedding_length
)
self.__embedding_length: int = self.embeddings.embedding_length
self.to(flair.device)
self.pooling = pooling
if self.pooling == "mean":
self.pool_op = torch.mean
elif pooling == "max":
self.pool_op = torch.max
elif pooling == "min":
self.pool_op = torch.min
else:
raise ValueError(f"Pooling operation for {self.mode!r} is not defined")
self.name: str = f"document_{self.pooling}"
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to every sentence in the given list of sentences. If embeddings are already added, updates
only if embeddings are non-static."""
# if only one sentence is passed, convert to list of sentence
if isinstance(sentences, Sentence):
sentences = [sentences]
self.embeddings.embed(sentences)
for sentence in sentences:
word_embeddings = []
for token in sentence.tokens:
word_embeddings.append(token.get_embedding().unsqueeze(0))
word_embeddings = torch.cat(word_embeddings, dim=0).to(flair.device)
if self.fine_tune_mode in ["nonlinear", "linear"]:
word_embeddings = self.embedding_flex(word_embeddings)
if self.fine_tune_mode in ["nonlinear"]:
word_embeddings = self.embedding_flex_nonlinear(word_embeddings)
word_embeddings = self.embedding_flex_nonlinear_map(word_embeddings)
if self.pooling == "mean":
pooled_embedding = self.pool_op(word_embeddings, 0)
else:
pooled_embedding, _ = self.pool_op(word_embeddings, 0)
sentence.set_embedding(self.name, pooled_embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
def extra_repr(self):
return f"fine_tune_mode={self.fine_tune_mode}, pooling={self.pooling}"
class DocumentRNNEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
rnn_type="GRU",
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the rnn
:param rnn_layers: the number of layers for the rnn
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the rnn or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional rnn or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
:param rnn_type: 'GRU' or 'LSTM'
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.rnn_type = rnn_type
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
# bidirectional RNN on top of embedding layer
if rnn_type == "LSTM":
self.rnn = torch.nn.LSTM(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
else:
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
self.name = "document_" + self.rnn._get_name()
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(
self.length_of_all_token_embeddings, dtype=torch.float
).unsqueeze(0)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
rnn_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(rnn_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM RNN
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
@deprecated(
version="0.4",
reason="The functionality of this class is moved to 'DocumentRNNEmbeddings'",
)
class DocumentLSTMEmbeddings(DocumentEmbeddings):
def __init__(
self,
embeddings: List[TokenEmbeddings],
hidden_size=128,
rnn_layers=1,
reproject_words: bool = True,
reproject_words_dimension: int = None,
bidirectional: bool = False,
dropout: float = 0.5,
word_dropout: float = 0.0,
locked_dropout: float = 0.0,
):
"""The constructor takes a list of embeddings to be combined.
:param embeddings: a list of token embeddings
:param hidden_size: the number of hidden states in the lstm
:param rnn_layers: the number of layers for the lstm
:param reproject_words: boolean value, indicating whether to reproject the token embeddings in a separate linear
layer before putting them into the lstm or not
:param reproject_words_dimension: output dimension of reprojecting token embeddings. If None the same output
dimension as before will be taken.
:param bidirectional: boolean value, indicating whether to use a bidirectional lstm or not
:param dropout: the dropout value to be used
:param word_dropout: the word dropout value to be used, if 0.0 word dropout is not used
:param locked_dropout: the locked dropout value to be used, if 0.0 locked dropout is not used
"""
super().__init__()
self.embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embeddings)
self.reproject_words = reproject_words
self.bidirectional = bidirectional
self.length_of_all_token_embeddings: int = self.embeddings.embedding_length
self.name = "document_lstm"
self.static_embeddings = False
self.__embedding_length: int = hidden_size
if self.bidirectional:
self.__embedding_length *= 4
self.embeddings_dimension: int = self.length_of_all_token_embeddings
if self.reproject_words and reproject_words_dimension is not None:
self.embeddings_dimension = reproject_words_dimension
# bidirectional LSTM on top of embedding layer
self.word_reprojection_map = torch.nn.Linear(
self.length_of_all_token_embeddings, self.embeddings_dimension
)
self.rnn = torch.nn.GRU(
self.embeddings_dimension,
hidden_size,
num_layers=rnn_layers,
bidirectional=self.bidirectional,
)
# dropouts
if locked_dropout > 0.0:
self.dropout: torch.nn.Module = LockedDropout(locked_dropout)
else:
self.dropout = torch.nn.Dropout(dropout)
self.use_word_dropout: bool = word_dropout > 0.0
if self.use_word_dropout:
self.word_dropout = WordDropout(word_dropout)
torch.nn.init.xavier_uniform_(self.word_reprojection_map.weight)
self.to(flair.device)
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[List[Sentence], Sentence]):
"""Add embeddings to all sentences in the given list of sentences. If embeddings are already added, update
only if embeddings are non-static."""
if type(sentences) is Sentence:
sentences = [sentences]
self.rnn.zero_grad()
sentences.sort(key=lambda x: len(x), reverse=True)
self.embeddings.embed(sentences)
# first, sort sentences by number of tokens
longest_token_sequence_in_batch: int = len(sentences[0])
all_sentence_tensors = []
lengths: List[int] = []
# go through each sentence in batch
for i, sentence in enumerate(sentences):
lengths.append(len(sentence.tokens))
word_embeddings = []
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embeddings.append(token.get_embedding().unsqueeze(0))
# PADDING: pad shorter sentences out
for add in range(longest_token_sequence_in_batch - len(sentence.tokens)):
word_embeddings.append(
torch.zeros(
self.length_of_all_token_embeddings, dtype=torch.float
).unsqueeze(0)
)
word_embeddings_tensor = torch.cat(word_embeddings, 0).to(flair.device)
sentence_states = word_embeddings_tensor
# ADD TO SENTENCE LIST: add the representation
all_sentence_tensors.append(sentence_states.unsqueeze(1))
# --------------------------------------------------------------------
# GET REPRESENTATION FOR ENTIRE BATCH
# --------------------------------------------------------------------
sentence_tensor = torch.cat(all_sentence_tensors, 1)
# --------------------------------------------------------------------
# FF PART
# --------------------------------------------------------------------
# use word dropout if set
if self.use_word_dropout:
sentence_tensor = self.word_dropout(sentence_tensor)
if self.reproject_words:
sentence_tensor = self.word_reprojection_map(sentence_tensor)
sentence_tensor = self.dropout(sentence_tensor)
packed = torch.nn.utils.rnn.pack_padded_sequence(sentence_tensor, lengths)
self.rnn.flatten_parameters()
lstm_out, hidden = self.rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = self.dropout(outputs)
# --------------------------------------------------------------------
# EXTRACT EMBEDDINGS FROM LSTM
# --------------------------------------------------------------------
for sentence_no, length in enumerate(lengths):
last_rep = outputs[length - 1, sentence_no]
embedding = last_rep
if self.bidirectional:
first_rep = outputs[0, sentence_no]
embedding = torch.cat([first_rep, last_rep], 0)
sentence = sentences[sentence_no]
sentence.set_embedding(self.name, embedding)
def _add_embeddings_internal(self, sentences: List[Sentence]):
pass
class DocumentLMEmbeddings(DocumentEmbeddings):
def __init__(self, flair_embeddings: List[FlairEmbeddings], detach: bool = True):
super().__init__()
self.embeddings = flair_embeddings
self.name = "document_lm"
self.static_embeddings = detach
self.detach = detach
self._embedding_length: int = sum(
embedding.embedding_length for embedding in flair_embeddings
)
@property
def embedding_length(self) -> int:
return self._embedding_length
def _add_embeddings_internal(self, sentences: List[Sentence]):
if type(sentences) is Sentence:
sentences = [sentences]
for embedding in self.embeddings:
embedding.embed(sentences)
# iterate over sentences
for sentence in sentences:
# if its a forward LM, take last state
if embedding.is_forward_lm:
sentence.set_embedding(
embedding.name,
sentence[len(sentence) - 1]._embeddings[embedding.name],
)
else:
sentence.set_embedding(
embedding.name, sentence[0]._embeddings[embedding.name]
)
return sentences
class NILCEmbeddings(WordEmbeddings):
def __init__(self, embeddings: str, model: str = "skip", size: int = 100):
"""
Initializes portuguese classic word embeddings trained by NILC Lab (http://www.nilc.icmc.usp.br/embeddings).
Constructor downloads required files if not there.
:param embeddings: one of: 'fasttext', 'glove', 'wang2vec' or 'word2vec'
:param model: one of: 'skip' or 'cbow'. This is not applicable to glove.
:param size: one of: 50, 100, 300, 600 or 1000.
"""
base_path = "http://143.107.183.175:22980/download.php?file=embeddings/"
cache_dir = Path("embeddings") / embeddings.lower()
# GLOVE embeddings
if embeddings.lower() == "glove":
cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{embeddings}_s{size}.zip", cache_dir=cache_dir
)
elif embeddings.lower() in ["fasttext", "wang2vec", "word2vec"]:
cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
embeddings = cached_path(
f"{base_path}{embeddings}/{model}_s{size}.zip", cache_dir=cache_dir
)
elif not Path(embeddings).exists():
raise ValueError(
f'The given embeddings "{embeddings}" is not available or is not a valid path.'
)
self.name: str = str(embeddings)
self.static_embeddings = True
log.info("Reading embeddings from %s" % embeddings)
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load_word2vec_format(
open_inside_zip(str(embeddings), cache_dir=cache_dir)
)
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
super(TokenEmbeddings, self).__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def __str__(self):
return self.name
def replace_with_language_code(string: str):
string = string.replace("arabic-", "ar-")
string = string.replace("basque-", "eu-")
string = string.replace("bulgarian-", "bg-")
string = string.replace("croatian-", "hr-")
string = string.replace("czech-", "cs-")
string = string.replace("danish-", "da-")
string = string.replace("dutch-", "nl-")
string = string.replace("farsi-", "fa-")
string = string.replace("persian-", "fa-")
string = string.replace("finnish-", "fi-")
string = string.replace("french-", "fr-")
string = string.replace("german-", "de-")
string = string.replace("hebrew-", "he-")
string = string.replace("hindi-", "hi-")
string = string.replace("indonesian-", "id-")
string = string.replace("italian-", "it-")
string = string.replace("japanese-", "ja-")
string = string.replace("norwegian-", "no")
string = string.replace("polish-", "pl-")
string = string.replace("portuguese-", "pt-")
string = string.replace("slovenian-", "sl-")
string = string.replace("spanish-", "es-")
string = string.replace("swedish-", "sv-")
return string
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.GRU",
"torch.no_grad",
"torch.FloatTensor",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.LongTensor",
"torch.tensor",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.eye",
"torch.mean"
] | 1.0.0 | atakanokan/flair | d33aa6a007384da76d1ae8dac6f4fc61bc652ce7 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""nn.Module with additional great features."""
import collections
import copy
import inspect
import os
import re
import tempfile
from abc import ABC
from argparse import Namespace
from pathlib import Path
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import torch
from torch import ScriptModule, Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
from pytorch_lightning import _logger as log
from pytorch_lightning.core.grads import GradInformation
from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, ModelIO, PRIMITIVE_TYPES
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.utilities import rank_zero_warn, TPU_AVAILABLE
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import AttributeDict, collect_init_args, get_init_args
if TPU_AVAILABLE:
import torch_xla.core.xla_model as xm
class LightningModule(
ABC,
DeviceDtypeModuleMixin,
GradInformation,
ModelIO,
ModelHooks,
DataHooks,
CheckpointHooks,
Module,
):
# Below is for property support of JIT in PyTorch 1.7
# since none of them is important when using JIT, we are going to ignore them.
__jit_unused_properties__ = [
"datamodule",
"example_input_array",
"hparams",
"hparams_initial",
"on_gpu",
"current_epoch",
"global_step",
] + DeviceDtypeModuleMixin.__jit_unused_properties__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
# torch/nn/modules/module.py#L227)
torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}")
self.exp_save_path = None
self.loaded_optimizer_states_dict = {}
#: Pointer to the trainer object
self.trainer = None
#: Pointer to the logger object
self.logger = None
#: True if using dp
self.use_dp = False
#: True if using ddp
self.use_ddp = False
#: True if using ddp2
self.use_ddp2 = False
# True if on tpu
self.use_tpu = False
#: True if using amp
self.use_amp = False
#: The precision used
self.precision = 32
# optionally can be set by user
self._example_input_array = None
self._datamodule = None
self._results: Optional[Result] = None
self._current_fx_name = ''
self._running_manual_backward = False
self._current_hook_fx_name = None
self._current_dataloader_idx = None
self._automatic_optimization: bool = True
def optimizers(self, use_pl_optimizer: bool = True) -> Union[Optimizer, List[Optimizer], List[LightningOptimizer]]:
if use_pl_optimizer:
opts = list(self.trainer.lightning_optimizers.values())
else:
opts = self.trainer.optimizers
# single optimizer
if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer):
return opts[0]
# multiple opts
return opts
@property
def example_input_array(self) -> Any:
return self._example_input_array
@property
def current_epoch(self) -> int:
"""The current epoch"""
return self.trainer.current_epoch if self.trainer else 0
@property
def global_step(self) -> int:
"""Total training batches seen across all epochs"""
return self.trainer.global_step if self.trainer else 0
@example_input_array.setter
def example_input_array(self, example: Any) -> None:
self._example_input_array = example
@property
def datamodule(self) -> Any:
return self._datamodule
@datamodule.setter
def datamodule(self, datamodule: Any) -> None:
self._datamodule = datamodule
@property
def on_gpu(self):
"""
True if your model is currently running on GPUs.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == "cuda"
@property
def automatic_optimization(self) -> bool:
"""
If False you are responsible for calling .backward, .step, zero_grad.
"""
return self._automatic_optimization
@automatic_optimization.setter
def automatic_optimization(self, automatic_optimization: bool) -> None:
self._automatic_optimization = automatic_optimization
def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. Will be passed to Python's built-in print function.
**kwargs: Will be passed to Python's built-in print function.
Example:
.. code-block:: python
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
print(*args, **kwargs)
def log(
self,
name: str,
value: Any,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a key, value
Example::
self.log('train_loss', loss)
The default behavior per hook is as follows
.. csv-table:: ``*`` also applies to the test loop
:header: "LightningMoule Hook", "on_step", "on_epoch", "prog_bar", "logger"
:widths: 20, 10, 10, 10, 10
"training_step", "T", "F", "F", "T"
"training_step_end", "T", "F", "F", "T"
"training_epoch_end", "F", "T", "F", "T"
"validation_step*", "F", "T", "F", "T"
"validation_step_end*", "F", "T", "F", "T"
"validation_epoch_end*", "F", "T", "F", "T"
Args:
name: key name
value: value name
prog_bar: if True logs to the progress bar
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group
"""
if self._results is not None:
# in any epoch end can't log step metrics (only epoch metric)
if 'epoch_end' in self._current_fx_name and on_step:
m = f'on_step=True cannot be used on {self._current_fx_name} method'
raise MisconfigurationException(m)
if 'epoch_end' in self._current_fx_name and on_epoch is False:
m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'
raise MisconfigurationException(m)
# add log_dict
# TODO: if logged twice fail with crash
# set the default depending on the fx_name
on_step = self.__auto_choose_log_on_step(on_step)
on_epoch = self.__auto_choose_log_on_epoch(on_epoch)
if self._current_hook_fx_name is not None:
self.trainer.logger_connector.check_logging_in_callbacks(
self._current_hook_fx_name,
on_step=on_step,
on_epoch=on_epoch
)
# make sure user doesn't introduce logic for multi-dataloaders
if "/dataloader_idx_" in name:
raise MisconfigurationException(
f"Logged key: {name} should not contain information about dataloader_idx.")
accelerator = self.trainer.accelerator_backend
self._results.log(
name,
value,
prog_bar,
logger,
on_step,
on_epoch,
reduce_fx,
tbptt_reduce_fx,
tbptt_pad_token,
enable_graph,
sync_dist,
sync_dist_op,
sync_dist_group,
accelerator.sync_tensor,
self._current_dataloader_idx,
self.device,
)
def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
self.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group:
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
def write_prediction(self, name, value, filename='predictions.pt'):
self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename)
def write_prediction_dict(self, predictions_dict, filename='predictions.pt'):
for k, v in predictions_dict.items():
self.write_prediction(k, v, filename)
def __auto_choose_log_on_step(self, on_step):
if on_step is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_step = True
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_step = False
else:
on_step = False
return on_step
def __auto_choose_log_on_epoch(self, on_epoch):
if on_epoch is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_epoch = False
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_epoch = True
else:
on_epoch = True
return on_epoch
def all_gather(self, tensor: Union[torch.Tensor], group: Optional[Any] = None, sync_grads: bool = False):
r"""
Allows users to call ``self.all_gather()`` from the LightningModule, thus making
the ```all_gather``` operation accelerator agnostic.
```all_gather``` is a function provided by accelerators to gather a tensor from several
distributed processes
Args:
tensor: tensor of shape (batch, ...)
group: the process group to gather results from. Defaults to all processes (world)
sync_grads: flag that allows users to synchronize gradients for all_gather op
Return:
A tensor of shape (world_size, batch, ...)
"""
return self.trainer.accelerator_backend.all_gather(tensor, group=group, sync_grads=sync_grads)
def forward(self, *args, **kwargs):
r"""
Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define
the operations you want to use for prediction (i.e.: on a server or as a feature extractor).
Normally you'd call ``self()`` from your :meth:`training_step` method.
This makes it easy to write a complex system for training with the outputs
you'd want in a prediction setting.
You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful
when using the module outside Lightning in a production setting.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Predicted output
Examples:
.. code-block:: python
# example if we were using this model as a feature extractor
def forward(self, x):
feature_maps = self.convnet(x)
return feature_maps
def training_step(self, batch, batch_idx):
x, y = batch
feature_maps = self(x)
logits = self.classifier(feature_maps)
# ...
return loss
# splitting it this way allows model to be used a feature extractor
model = MyModelAbove()
inputs = server.get_request()
results = model(inputs)
server.write_results(results)
# -------------
# This is in stark contrast to torch.nn.Module where normally you would have this:
def forward(self, batch):
x, y = batch
feature_maps = self.convnet(x)
logits = self.classifier(feature_maps)
return logits
"""
return super().forward(*args, **kwargs)
def training_step(self, *args, **kwargs):
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch
optimizer_idx (int): When using multiple optimizers, this argument will also be present.
hiddens(:class:`~torch.Tensor`): Passed in if
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.
Return:
Any of.
- :class:`~torch.Tensor` - The loss tensor
- `dict` - A dictionary. Can include any keys, but must include the key 'loss'
- `None` - Training will skip to the next batch
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
out = self.encoder(x)
loss = self.loss(out, x)
return loss
If you define multiple optimizers, this step will be called with an additional
``optimizer_idx`` parameter.
.. code-block:: python
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
# do training_step with encoder
if optimizer_idx == 1:
# do training_step with decoder
If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.
.. code-block:: python
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hidden states from the previous truncated backprop step
...
out, hiddens = self.lstm(data, hiddens)
...
return {'loss': loss, 'hiddens': hiddens}
Note:
The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.
"""
rank_zero_warn(
"`training_step` must be implemented to be used with the Lightning Trainer"
)
def training_step_end(self, *args, **kwargs):
"""
Use this when training with dp or ddp2 because :meth:`training_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]
training_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in `training_step` for each batch part.
Return:
Anything
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
# softmax uses only a portion of the batch in the denomintaor
loss = self.softmax(out)
loss = nce_loss(loss)
return loss
If you wish to do something with all the parts of the batch, then use this method to do it:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return {'pred': out}
def training_step_end(self, training_step_outputs):
gpu_0_pred = training_step_outputs[0]['pred']
gpu_1_pred = training_step_outputs[1]['pred']
gpu_n_pred = training_step_outputs[n]['pred']
# this softmax now uses the full batch
loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])
return loss
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def training_epoch_end(self, outputs: List[Any]) -> None:
"""
Called at the end of the training epoch with the outputs of all training steps.
Use this in case you need to do something with all the outputs for every training_step.
.. code-block:: python
# the pseudocode for these calls
train_outs = []
for train_batch in train_data:
out = training_step(train_batch)
train_outs.append(out)
training_epoch_end(train_outs)
Args:
outputs: List of outputs you defined in :meth:`training_step`, or if there are
multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If this method is not overridden, this won't be called.
Example::
def training_epoch_end(self, training_step_outputs):
# do something with all training_step outputs
return result
With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each training step for that dataloader.
.. code-block:: python
def training_epoch_end(self, training_step_outputs):
for out in training_step_outputs:
# do something here
"""
def validation_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple val dataloaders used)
Return:
Any of.
- Any object or value
- `None` - Validation will skip to the next batch
.. code-block:: python
# pseudocode of order
out = validation_step()
if defined('validation_step_end'):
out = validation_step_end(out)
out = validation_epoch_end(out)
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx)
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'val_loss': loss, 'val_acc': val_acc})
If you pass in multiple val dataloaders, :meth:`validation_step` will have an additional argument.
.. code-block:: python
# CASE 2: multiple validation dataloaders
def validation_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def validation_step_end(self, *args, **kwargs):
"""
Use this when validating with dp or ddp2 because :meth:`validation_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]
validation_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`validation_step`
for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT validation_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
loss = self.softmax(out)
loss = nce_loss(loss)
self.log('val_loss', loss)
# --------------
# with validation_step_end to do softmax over the full batch
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
return out
def validation_step_end(self, val_step_outputs):
for out in val_step_outputs:
# do something with these
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def validation_epoch_end(self, outputs: List[Any]) -> None:
"""
Called at the end of the validation epoch with the outputs of all validation steps.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
outputs: List of outputs you defined in :meth:`validation_step`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If you didn't define a :meth:`validation_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def validation_epoch_end(self, val_step_outputs):
for out in val_step_outputs:
# do something
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each validation step for that dataloader.
.. code-block:: python
def validation_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
self.log('final_metric', final_value)
"""
def test_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
such as accuracy.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch.
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple test dataloaders used).
Return:
Any of.
- Any object or value
- `None` - Testing will skip to the next batch
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx)
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'test_loss': loss, 'test_acc': test_acc})
If you pass in multiple test dataloaders, :meth:`test_step` will have an additional
argument.
.. code-block:: python
# CASE 2: multiple test dataloaders
def test_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to test you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def test_step_end(self, *args, **kwargs):
"""
Use this when testing with dp or ddp2 because :meth:`test_step` will operate
on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]
test_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`test_step` for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT test_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
self.log('test_loss', loss)
# --------------
# with test_step_end to do softmax over the full batch
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return out
def test_step_end(self, output_results):
# this out is now the full size of the batch
all_test_step_outs = output_results.out
loss = nce_loss(all_test_step_outs)
self.log('test_loss', loss)
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def test_epoch_end(
self, outputs: List[Any]
) -> None:
"""
Called at the end of a test epoch with the output of all test steps.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader
Return:
None
Note:
If you didn't define a :meth:`test_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def test_epoch_end(self, outputs):
# do something with the outputs of all test batches
all_test_preds = test_step_outputs.predictions
some_result = calc_all_results(all_test_preds)
self.log(some_result)
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each test step for that dataloader.
.. code-block:: python
def test_epoch_end(self, outputs):
final_value = 0
for dataloader_outputs in outputs:
for test_step_out in dataloader_outputs:
# do something
final_value += test_step_out
self.log('final_metric', final_value)
"""
def configure_optimizers(
self,
):
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- Single optimizer.
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'
key whose value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional 'frequency' key.
- None - Fit will run without any optimizer.
Note:
The 'frequency' value is an int corresponding to the number of sequential batches
optimized with the specific optimizer. It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list,
and passing multiple optimizers in dictionaries with a frequency of 1:
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
The lr_dict is a dictionary which contains the scheduler and its associated configuration.
The default configuration is shown below.
.. code-block:: python
{
'scheduler': lr_scheduler, # The LR scheduler instance (required)
'interval': 'epoch', # The unit of the scheduler's step size
'frequency': 1, # The frequency of the scheduler
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor
'strict': True, # Whether to crash the training if `monitor` is not found
'name': None, # Custom name for LearningRateMonitor to use
}
Only the ``scheduler`` key is required, the rest will be set to the defaults above.
Examples:
.. code-block:: python
# most cases
def configure_optimizers(self):
opt = Adam(self.parameters(), lr=1e-3)
return opt
# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
return generator_opt, disriminator_opt
# example with learning rate schedulers
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)
return [generator_opt, disriminator_opt], [discriminator_sched]
# example with step-based learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),
'interval': 'step'} # called after each training step
dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch
return [gen_opt, dis_opt], [gen_sched, dis_sched]
# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
n_critic = 5
return (
{'optimizer': dis_opt, 'frequency': n_critic},
{'optimizer': gen_opt, 'frequency': 1}
)
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer
and learning rate scheduler as needed.
- If you use 16-bit precision (``precision=16``), Lightning will automatically
handle the optimizers for you.
- If you use multiple optimizers, :meth:`training_step` will have an additional
``optimizer_idx`` parameter.
- If you use LBFGS Lightning handles the closure function automatically for you.
- If you use multiple optimizers, gradients will be calculated only
for the parameters of current optimizer at each training step.
- If you need to control how often those optimizers step or override the
default ``.step()`` schedule, override the :meth:`optimizer_step` hook.
- If you only want to call a learning rate scheduler every ``x`` step or epoch,
or want to monitor a custom metric, you can specify these in a lr_dict:
.. code-block:: python
{
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1',
'frequency': x,
}
"""
rank_zero_warn(
"`configure_optimizers` must be implemented to be used with the Lightning Trainer"
)
def manual_backward(self, loss: Tensor, optimizer: Optimizer, *args, **kwargs) -> None:
"""
Call this directly from your training_step when doing optimizations manually.
By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you
This function forwards all args to the .backward() call as well.
.. tip:: In manual mode we still automatically clip grads if Trainer(gradient_clip_val=x) is set
.. tip:: In manual mode we still automatically accumulate grad over batches if
Trainer(accumulate_grad_batches=x) is set and you use `optimizer.step()`
Example::
def training_step(...):
(opt_a, opt_b) = self.optimizers()
loss = ...
# automatically applies scaling, etc...
self.manual_backward(loss, opt_a)
opt_a.step()
"""
# make sure we're using manual opt
self._verify_is_manual_optimization('manual_backward')
# backward
self._running_manual_backward = True
self.trainer.train_loop.backward(loss, optimizer, -1, *args, **kwargs)
self._running_manual_backward = False
def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
"""
Override backward with your own implementation if you need to.
Args:
loss: Loss is already scaled by accumulated grads
optimizer: Current optimizer being used
optimizer_idx: Index of the current optimizer being used
Called to perform backward step.
Feel free to override as needed.
The loss passed in has already been scaled for accumulated gradients if requested.
Example::
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
"""
if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
loss.backward(*args, **kwargs)
def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
"""
Makes sure only the gradients of the current optimizer's parameters are calculated
in the training step to prevent dangling gradients in multiple-optimizer setup.
.. note:: Only called when using multiple optimizers
Override for your own behavior
Args:
optimizer:
optimizer_idx:
"""
for param in self.parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
def optimizer_step(
self,
epoch: int = None,
batch_idx: int = None,
optimizer: Optimizer = None,
optimizer_idx: int = None,
optimizer_closure: Optional[Callable] = None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
) -> None:
r"""
Override this method to adjust the default way the
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
once per optimizer.
.. tip:: With `Trainer(enable_pl_optimizer=True)`, you can user `optimizer.step()` directly and it will handle zero_grad, accumulated gradients, AMP, TPU and more automatically for you.
Warning:
If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter
to ``optimizer.step()`` function as shown in the examples. This ensures that
``train_step_and_backward_closure`` is called within
:meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers this indexes into that list.
optimizer_closure: closure for all optimizers
on_tpu: true if TPU backward is required
using_native_amp: True if using native amp
using_lbfgs: True if the matching optimizer is lbfgs
Examples:
.. code-block:: python
# DEFAULT
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
optimizer.step(closure=optimizer_closure)
# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# update generator opt every 2 steps
if optimizer_idx == 0:
if batch_idx % 2 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# update discriminator opt every 4 steps
if optimizer_idx == 1:
if batch_idx % 4 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# ...
# add as many optimizers as you want
Here's another example showing how to use this for more advanced things such as
learning rate warm-up:
.. code-block:: python
# learning rate warm-up
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# warm up lr
if self.trainer.global_step < 500:
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)
for pg in optimizer.param_groups:
pg['lr'] = lr_scale * self.learning_rate
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
"""
optimizer.step(closure=optimizer_closure)
def optimizer_zero_grad(
self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int
):
optimizer.zero_grad()
def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:
r"""
When using truncated backpropagation through time, each batch must be split along the
time dimension. Lightning handles this by default, but for custom behavior override
this function.
Args:
batch: Current batch
split_size: The size of the split
Return:
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
back propagation through time. The default implementation splits root level Tensors and
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
Examples:
.. code-block:: python
def tbptt_split_batch(self, batch, split_size):
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
Note:
Called in the training loop after
:meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`
if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.
Each returned batch split is passed separately to :meth:`training_step`.
"""
time_dims = [
len(x[0])
for x in batch
if isinstance(x, (torch.Tensor, collections.Sequence))
]
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
assert all(
x == time_dims[0] for x in time_dims
), "Batch time dimension length is ambiguous"
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t: t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t: t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
def summarize(self, mode: Optional[str] = ModelSummary.MODE_DEFAULT) -> Optional[ModelSummary]:
model_summary = None
if mode in ModelSummary.MODES:
model_summary = ModelSummary(self, mode=mode)
log.info("\n" + str(model_summary))
elif mode is not None:
raise MisconfigurationException(
f"`mode` can be None, {', '.join(ModelSummary.MODES)}, got {mode}"
)
return model_summary
def freeze(self) -> None:
r"""
Freeze all params for inference.
Example:
.. code-block:: python
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
r"""
Implement this to override the default items displayed in the progress bar.
By default it includes the average loss value, split index of BPTT (if used)
and the version of the experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
Here is an example how to override the defaults:
.. code-block:: python
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
Return:
Dictionary with the items to be displayed in the progress bar.
"""
# call .item() only once but store elements without graphs
running_train_loss = self.trainer.train_loop.running_loss.mean()
avg_training_loss = None
if running_train_loss is not None:
avg_training_loss = running_train_loss.cpu().item()
elif self.trainer.train_loop.automatic_optimization:
avg_training_loss = float('NaN')
tqdm_dict = {}
if avg_training_loss is not None:
tqdm_dict["loss"] = f"{avg_training_loss:.3g}"
if self.trainer.truncated_bptt_steps is not None:
tqdm_dict["split_idx"] = self.trainer.split_idx
if self.trainer.logger is not None and self.trainer.logger.version is not None:
version = self.trainer.logger.version
# show last 4 places of long version strings
version = version[-4:] if isinstance(version, str) else version
tqdm_dict["v_num"] = version
return tqdm_dict
def _verify_is_manual_optimization(self, fn_name):
if self.trainer.train_loop.automatic_optimization:
raise MisconfigurationException(
f'to use {fn_name}, please disable automatic optimization:'
' set model property `automatic_optimization` as False'
)
@classmethod
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
"""
Collect all module arguments in the current constructor and all child constructors.
The child constructors are all the ``__init__`` methods that reach the current class through
(chained) ``super().__init__()`` calls.
Args:
frame: instance frame
Returns:
self_arguments: arguments dictionary of the first instance
parents_arguments: arguments dictionary of the parent's instances
"""
if not frame:
frame = inspect.currentframe()
frame_args = collect_init_args(frame.f_back, [])
self_arguments = frame_args[-1]
# set hyper_parameters in child
self_arguments = self_arguments
parents_arguments = {}
# add all arguments from parents
for args in frame_args[:-1]:
parents_arguments.update(args)
return self_arguments, parents_arguments
def save_hyperparameters(self, *args, frame=None) -> None:
"""Save all model arguments.
Args:
args: single object of `dict`, `NameSpace` or `OmegaConf`
or string names or argumenst from class `__init__`
>>> from collections import OrderedDict
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # manually assign arguments
... self.save_hyperparameters('arg1', 'arg3')
... def forward(self, *args, **kwargs):
... ...
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg3": 3.14
>>> class AutomaticArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # equivalent automatic
... self.save_hyperparameters()
... def forward(self, *args, **kwargs):
... ...
>>> model = AutomaticArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg2": abc
"arg3": 3.14
>>> class SingleArgModel(LightningModule):
... def __init__(self, params):
... super().__init__()
... # manually assign single argument
... self.save_hyperparameters(params)
... def forward(self, *args, **kwargs):
... ...
>>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))
>>> model.hparams
"p1": 1
"p2": abc
"p3": 3.14
"""
if not frame:
frame = inspect.currentframe().f_back
init_args = get_init_args(frame)
assert init_args, "failed to inspect the self init"
if not args:
# take all arguments
hp = init_args
self._hparams_name = "kwargs" if hp else None
else:
# take only listed arguments in `save_hparams`
isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]
if len(isx_non_str) == 1:
hp = args[isx_non_str[0]]
cand_names = [k for k, v in init_args.items() if v == hp]
self._hparams_name = cand_names[0] if cand_names else None
else:
hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}
self._hparams_name = "kwargs"
# `hparams` are expected here
if hp:
self._set_hparams(hp)
# make deep copy so there is not other runtime changes reflected
self._hparams_initial = copy.deepcopy(self._hparams)
def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:
if isinstance(hp, Namespace):
hp = vars(hp)
if isinstance(hp, dict):
hp = AttributeDict(hp)
elif isinstance(hp, PRIMITIVE_TYPES):
raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.")
elif not isinstance(hp, ALLOWED_CONFIG_TYPES):
raise ValueError(f"Unsupported config type of {type(hp)}.")
if isinstance(hp, dict) and isinstance(self.hparams, dict):
self.hparams.update(hp)
else:
self._hparams = hp
@torch.no_grad()
def to_onnx(
self,
file_path: Union[str, Path],
input_sample: Optional[Any] = None,
**kwargs,
):
"""
Saves the model in ONNX format
Args:
file_path: The path of the file the onnx model should be saved to.
input_sample: An input for tracing. Default: None (Use self.example_input_array)
**kwargs: Will be passed to torch.onnx.export function.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
... model = SimpleModel()
... input_sample = torch.randn((1, 64))
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
... os.path.isfile(tmpfile.name)
True
"""
mode = self.training
if input_sample is None:
if self.example_input_array is None:
raise ValueError(
"Could not export to ONNX since neither `input_sample` nor"
" `model.example_input_array` attribute is set."
)
input_sample = self.example_input_array
input_sample = self.transfer_batch_to_device(input_sample)
if "example_outputs" not in kwargs:
self.eval()
kwargs["example_outputs"] = self(input_sample)
torch.onnx.export(self, input_sample, file_path, **kwargs)
self.train(mode)
@torch.no_grad()
def to_torchscript(
self,
file_path: Optional[Union[str, Path]] = None,
method: Optional[str] = 'script',
example_inputs: Optional[Any] = None,
**kwargs,
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""
By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.
If you want to use tracing, please provided the argument `method='trace'` and make sure that either the
example_inputs argument is provided, or the model has self.example_input_array set.
If you would like to customize the modules that are scripted you should override this method.
In case you want to return multiple modules, we recommend using a dictionary.
Args:
file_path: Path where to save the torchscript. Default: None (no file saved).
method: Whether to use TorchScript's script or trace method. Default: 'script'
example_inputs: An input to be used to do tracing when method is set to 'trace'.
Default: None (Use self.example_input_array)
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or
:func:`torch.jit.trace` function.
Note:
- Requires the implementation of the
:meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.
- The exported script will be set to evaluation mode.
- It is recommended that you install the latest supported version of PyTorch
to use this feature without limitations. See also the :mod:`torch.jit`
documentation for supported features.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
...
>>> model = SimpleModel()
>>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP
>>> os.path.isfile("model.pt") # doctest: +SKIP
>>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP
... example_inputs=torch.randn(1, 64))) # doctest: +SKIP
>>> os.path.isfile("model_trace.pt") # doctest: +SKIP
True
Return:
This LightningModule as a torchscript, regardless of whether file_path is
defined or not.
"""
mode = self.training
if method == 'script':
torchscript_module = torch.jit.script(self.eval(), **kwargs)
elif method == 'trace':
# if no example inputs are provided, try to see if model has example_input_array set
if example_inputs is None:
if self.example_input_array is None:
raise ValueError(
'Choosing method=`trace` requires either `example_inputs`'
' or `model.example_input_array` to be defined'
)
example_inputs = self.example_input_array
# automatically send example inputs to the right device and use trace
example_inputs = self.transfer_batch_to_device(example_inputs)
torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError("The 'method' parameter only supports 'script' or 'trace',"
f" but value given was: {method}")
self.train(mode)
if file_path is not None:
torch.jit.save(torchscript_module, file_path)
return torchscript_module
@property
def hparams(self) -> Union[AttributeDict, dict, Namespace]:
if not hasattr(self, "_hparams"):
self._hparams = AttributeDict()
return self._hparams
@property
def hparams_initial(self) -> AttributeDict:
if not hasattr(self, "_hparams_initial"):
return AttributeDict()
# prevent any change
return copy.deepcopy(self._hparams_initial)
@hparams.setter
def hparams(self, hp: Union[dict, Namespace, Any]):
# TODO: remove this method in v1.3.0.
rank_zero_warn(
"The setter for self.hparams in LightningModule is deprecated since v1.1.0 and will be"
" removed in v1.3.0. Replace the assignment `self.hparams = hparams` with "
" `self.save_hyperparameters()`.",
DeprecationWarning
)
hparams_assignment_name = self.__get_hparams_assignment_variable()
self._hparams_name = hparams_assignment_name
self._set_hparams(hp)
# this resolves case when user does not uses `save_hyperparameters` and do hard assignement in init
if not hasattr(self, "_hparams_initial"):
self._hparams_initial = copy.deepcopy(self._hparams)
def __get_hparams_assignment_variable(self):
"""
looks at the code of the class to figure out what the user named self.hparams
this only happens when the user explicitly sets self.hparams
"""
try:
class_code = inspect.getsource(self.__class__)
lines = class_code.split("\n")
for line in lines:
line = re.sub(r"\s+", "", line, flags=re.UNICODE)
if ".hparams=" in line:
return line.split("=")[1]
except Exception:
return "hparams"
return None
| [
"torch._C._log_api_usage_once",
"torch.no_grad",
"torch.jit.save",
"torch.onnx.export"
] | 1.3 | tobiasmaier/pytorch-lightning | 7f352cb69a8202e3f829419657597697ca5d99e2 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
import torch
from torch import optim
from pytorch_lightning import _logger as log
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.cluster_environments import ClusterEnvironment
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.distributed import LightningDistributed
from pytorch_lightning.overrides.data_parallel import LightningDataParallel
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.exceptions import MisconfigurationException
class DataParallelAccelerator(Accelerator):
def __init__(self, trainer, cluster_environment: Optional[ClusterEnvironment] = None):
"""
Runs training using DP via manual start (not HPC cluster)
Example::
# default
trainer = Trainer(accelerator=DataParallelAccelerator())
"""
super().__init__(trainer, cluster_environment)
self.model_autocast_original_forward = None
self.dist = LightningDistributed()
self.nickname = 'dp'
def setup(self, model):
# call setup after the ddp process has connected
self.trainer.call_setup_hook(model)
# put model on correct device
model.cuda(self.trainer.root_gpu)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
self.setup_optimizers(model)
# init torch data parallel
model = self.__init_torch_data_parallel(model)
# hack forward to do autocast for the user
self.model_autocast_original_forward = model.forward
# init half precision
if self.trainer.amp_backend:
model = self.__init_half_precision(model)
self.trainer.model = model
def __init_torch_data_parallel(self, model):
# create list of device ids
device_ids = self.trainer.data_parallel_device_ids
if isinstance(device_ids, int):
device_ids = list(range(device_ids))
# set dp device
torch.cuda.set_device(self.trainer.root_gpu)
model = LightningDataParallel(model, device_ids=device_ids)
return model
def __init_half_precision(self, model):
if self.trainer.amp_backend == AMPType.NATIVE:
self.__init_native_amp(model)
else:
model = self.__init_nvidia_apex(model)
return model
def __init_native_amp(self, model):
model.forward = torch.cuda.amp.autocast()(model.forward)
def __init_nvidia_apex(self, model):
# check for this bug (amp + dp + !01 doesn't work)
# https://github.com/NVIDIA/apex/issues/227
if self.trainer.amp_level == 'O2':
raise MisconfigurationException(
f'Amp level {self.trainer.amp_level} with DataParallel is not supported.'
f' See this note from NVIDIA for more info: https://github.com/NVIDIA/apex/issues/227.'
f' We recommend you switch to ddp if you want to use amp')
else:
model = self.trainer.precision_connector.connect(model)
return model
def teardown(self):
# replace the original fwd function
self.trainer.model.forward = self.model_autocast_original_forward
self.barrier()
def _step(self, args):
if self.trainer.amp_backend == AMPType.NATIVE:
with torch.cuda.amp.autocast():
output = self.trainer.model(*args)
else:
output = self.trainer.model(*args)
return output
def training_step(self, args):
return self._step(args)
def validation_step(self, args):
return self._step(args)
def test_step(self, args):
return self._step(args)
def training_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
elif isinstance(output, torch.Tensor):
output = output.mean()
return output
def validation_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
elif isinstance(output, torch.Tensor):
output = output.mean()
return output
def test_step_end(self, output):
if isinstance(output, Result):
output.dp_reduce()
elif isinstance(output, torch.Tensor):
output = output.mean()
return output
def get_reference_model(self, model) -> LightningModule:
if isinstance(model, LightningDataParallel):
return model.module
return model
@property
def require_distributed_sampler(self):
return False
| [
"torch.cuda.set_device",
"torch.cuda.amp.autocast"
] | 1.3 | tobiasmaier/pytorch-lightning | 7f352cb69a8202e3f829419657597697ca5d99e2 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from pathlib import Path
from typing import Optional, Union
import torch
import pytorch_lightning
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.utilities import AMPType, APEX_AVAILABLE, OMEGACONF_AVAILABLE, rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.cloud_io import atomic_save, get_filesystem
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.upgrade_checkpoint import KEYS_MAPPING as DEPRECATED_CHECKPOINT_KEYS
if APEX_AVAILABLE:
from apex import amp
if OMEGACONF_AVAILABLE:
from omegaconf import Container
class CheckpointConnector:
def __init__(self, trainer):
self.trainer = trainer
# used to validate checkpointing logic
self.has_trained = False
def restore_weights(self) -> None:
"""
Attempt to restore a checkpoint (e.g. weights) in this priority:
1. from HPC weights
2. from `resume_from_checkpoint` file
3. don't restore
"""
# clear cache before restore
if self.trainer.on_gpu:
torch.cuda.empty_cache()
# 1. Attempt to restore states from HPC checkpoint
dir_path_hpc = str(self.trainer.weights_save_path)
max_suffix = self.max_ckpt_in_folder(dir_path_hpc, "hpc_ckpt_")
if max_suffix is not None:
checkpoint_path = f'{dir_path_hpc}/hpc_ckpt_{max_suffix}.ckpt'
self.hpc_load(checkpoint_path, self.trainer.on_gpu)
rank_zero_info(f'restored hpc model from: {checkpoint_path}')
# 2. Attempt to restore states from `resume_from_checkpoint` file
elif self.trainer.resume_from_checkpoint is not None:
self.restore(self.trainer.resume_from_checkpoint, on_gpu=self.trainer.on_gpu)
# wait for all to catch up
self.trainer.accelerator_backend.barrier('TrainerIOMixin.restore_weights')
# clear cache after restore
if self.trainer.on_gpu:
torch.cuda.empty_cache()
def restore(self, checkpoint_path: str, on_gpu: bool) -> bool:
"""
Load model/training states from a 'PyTorch-Lightning checkpoint' file through file-read and state-restore.
All restored states are listed in return value description of `dump_checkpoint`.
"""
# Try to read the checkpoint file at `checkpoint_path`. If not exist, do not restore checkpoint.
fs = get_filesystem(checkpoint_path)
if not fs.exists(checkpoint_path):
rank_zero_warn("No checkpoint file exists at `resume_from_checkpoint`. Start from scratch")
return False
# read a checkpoint dictionary object from the 'PyTorch-Lightning checkpoint' file at `checkpoint_path`
checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
# acquire the model
model = self.trainer.get_model()
# restore model and datamodule state
self.restore_model_state(model, checkpoint)
if on_gpu:
model.cuda(self.trainer.root_gpu)
# restore training state
self.restore_training_state(checkpoint)
rank_zero_info(f"Restored states from the checkpoint file at {checkpoint_path}")
return True
def restore_model_state(self, model: LightningModule, checkpoint) -> None:
"""
Restore model states from a 'PyTorch-Lightning checkpoint' dictionary object
"""
# restore datamodule states
if self.trainer.datamodule is not None:
self.trainer.datamodule.on_load_checkpoint(checkpoint)
# hook: give user access to checkpoint if needed.
model.on_load_checkpoint(checkpoint)
# restore model state_dict
model.load_state_dict(checkpoint['state_dict'])
def restore_training_state(self, checkpoint):
"""
Restore trainer state.
Model will get its change to update
:param checkpoint:
:return:
"""
# validation
if 'optimizer_states' not in checkpoint or 'lr_schedulers' not in checkpoint:
raise KeyError(
'Trying to restore training state but checkpoint contains only the model.'
' This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`.'
)
if any([key in checkpoint for key in DEPRECATED_CHECKPOINT_KEYS]):
raise ValueError(
"The checkpoint you're attempting to load follows an"
" outdated schema. You can upgrade to the current schema by running"
" `python -m pytorch_lightning.utilities.upgrade_checkpoint --file model.ckpt`"
" where `model.ckpt` is your checkpoint file."
)
# restore amp scaling
if self.trainer.amp_backend == AMPType.NATIVE and 'native_amp_scaling_state' in checkpoint:
self.trainer.scaler.load_state_dict(checkpoint['native_amp_scaling_state'])
elif self.trainer.amp_backend == AMPType.APEX and 'amp_scaling_state' in checkpoint:
amp.load_state_dict(checkpoint['amp_scaling_state'])
# restore callback states
self.trainer.on_load_checkpoint(checkpoint)
self.trainer.global_step = checkpoint['global_step']
self.trainer.current_epoch = checkpoint['epoch']
# crash if max_epochs is lower then the current epoch from the checkpoint
if self.trainer.current_epoch > self.trainer.max_epochs:
m = f"""
you restored a checkpoint with current_epoch={self.trainer.current_epoch}
but the Trainer(max_epochs={self.trainer.max_epochs})
"""
raise MisconfigurationException(m)
# Division deals with global step stepping once per accumulated batch
# Inequality deals with different global step for odd vs even num_training_batches
n_accum = 1 if self.trainer.accumulate_grad_batches is None else self.trainer.accumulate_grad_batches
expected_steps = self.trainer.num_training_batches / n_accum
if self.trainer.num_training_batches != 0 and self.trainer.global_step % expected_steps > 1:
rank_zero_warn(
"You're resuming from a checkpoint that ended mid-epoch."
" Training will start from the beginning of the next epoch."
" This can cause unreliable results if further training is done,"
" consider using an end of epoch checkpoint."
)
# restore the optimizers
optimizer_states = checkpoint['optimizer_states']
for optimizer, opt_state in zip(self.trainer.optimizers, optimizer_states):
optimizer.load_state_dict(opt_state)
# move optimizer to GPU 1 weight at a time
# avoids OOM
if self.trainer.root_gpu is not None:
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda(self.trainer.root_gpu)
# restore the lr schedulers
lr_schedulers = checkpoint['lr_schedulers']
for scheduler, lrs_state in zip(self.trainer.lr_schedulers, lr_schedulers):
scheduler['scheduler'].load_state_dict(lrs_state)
# ----------------------------------
# PRIVATE OPS
# ----------------------------------
def hpc_save(self, folderpath: str, logger):
# make sure the checkpoint folder exists
folderpath = str(folderpath) # because the tests pass a path object
fs = get_filesystem(folderpath)
fs.makedirs(folderpath, exist_ok=True)
# save logger to make sure we get all the metrics
logger.save()
max_suffix = self.max_ckpt_in_folder(folderpath)
ckpt_number = (max_suffix if max_suffix is not None else 0) + 1
fs.makedirs(folderpath, exist_ok=True)
filepath = os.path.join(folderpath, f'hpc_ckpt_{ckpt_number}.ckpt')
# give model a chance to do something on hpc_save
model = self.trainer.get_model()
checkpoint = self.dump_checkpoint()
model.on_hpc_save(checkpoint)
if self.trainer.accelerator_backend:
checkpoint = self.trainer.accelerator_backend.on_save(checkpoint)
# do the actual save
# TODO: fix for anything with multiprocess DP, DDP, DDP2
try:
atomic_save(checkpoint, filepath)
except AttributeError as err:
if LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:
del checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
rank_zero_warn(
'warning, `hyper_parameters` dropped from checkpoint.' f' An attribute is not picklable {err}'
)
atomic_save(checkpoint, filepath)
return filepath
def dump_checkpoint(self, weights_only: bool = False) -> dict:
"""Creating a model checkpoint dictionary object from various component states.
Args:
weights_only: saving model weights only
Return:
structured dictionary: {
'epoch': training epoch
'global_step': training global step
'pytorch-lightning_version': PyTorch Lightning's version
'callbacks': "callback specific state"[] # if not weights_only
'optimizer_states': "PT optim's state_dict"[] # if not weights_only
'lr_schedulers': "PT sched's state_dict"[] # if not weights_only
'native_amp_scaling_state': PT amp's state_dict # if not weights_only and use native amp
'amp_scaling_state': Apex's state_dict # if not weights_only and use apex amp
'state_dict': Model's state_dict (e.g. network weights)
CHECKPOINT_HYPER_PARAMS_NAME:
CHECKPOINT_HYPER_PARAMS_KEY:
CHECKPOINT_HYPER_PARAMS_TYPE:
something_cool_i_want_to_save: anything you define through model.on_save_checkpoint
LightningDataModule.__class__.__name__: pl DataModule's state
}
"""
# dump epoch/global_step/pytorch-lightning_version
current_epoch = self.trainer.current_epoch
global_step = self.trainer.global_step
has_reached_max_steps = self.trainer.max_steps and self.trainer.max_steps <= global_step
global_step += 1
if not has_reached_max_steps:
current_epoch += 1
checkpoint = {
'epoch': current_epoch,
'global_step': global_step,
'pytorch-lightning_version': pytorch_lightning.__version__,
}
if not weights_only:
# dump callbacks
callback_states = self.trainer.on_save_checkpoint()
checkpoint['callbacks'] = callback_states
optimizer_states = []
for i, optimizer in enumerate(self.trainer.optimizers):
# Rely on accelerator to dump optimizer state
optimizer_state = self.trainer.accelerator_backend.optimizer_state(optimizer)
optimizer_states.append(optimizer_state)
checkpoint['optimizer_states'] = optimizer_states
# dump lr schedulers
lr_schedulers = []
for scheduler in self.trainer.lr_schedulers:
lr_schedulers.append(scheduler['scheduler'].state_dict())
checkpoint['lr_schedulers'] = lr_schedulers
# dump amp scaling
if self.trainer.amp_backend == AMPType.NATIVE and not self.trainer.use_tpu and self.trainer.scaler is not None:
checkpoint['native_amp_scaling_state'] = self.trainer.scaler.state_dict()
elif self.trainer.amp_backend == AMPType.APEX:
checkpoint['amp_scaling_state'] = amp.state_dict()
# add the hyper_parameters and state_dict from the model
model = self.trainer.get_model()
# dump the module_arguments and state_dict from the model
checkpoint['state_dict'] = model.state_dict()
if model.hparams:
if hasattr(model, '_hparams_name'):
checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_NAME] = model._hparams_name
# dump arguments
if OMEGACONF_AVAILABLE and isinstance(model.hparams, Container):
checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] = model.hparams
checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_TYPE] = type(model.hparams)
else:
checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY] = dict(model.hparams)
# give the model a chance to dump a few things
model.on_save_checkpoint(checkpoint)
if self.trainer.datamodule is not None:
self.trainer.datamodule.on_save_checkpoint(checkpoint)
return checkpoint
def hpc_load(self, checkpoint_path: str, on_gpu: bool):
"""
Load model/training states from a 'PyTorch-Lightning checkpoint' file for hpc.
All restored states are listed in return value description of `dump_checkpoint`.
"""
# read a checkpoint dictionary object from the 'PyTorch-Lightning checkpoint' file at `checkpoint_path`
checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
# acquire the model
model = self.trainer.get_model()
# restore model and datamodule state
self.restore_model_state(model, checkpoint)
if self.trainer.root_gpu is not None:
model.cuda(self.trainer.root_gpu)
# restore training state
self.restore_training_state(checkpoint)
# call hpc specific hook
model.on_hpc_load(checkpoint)
def max_ckpt_in_folder(self, dir_path: Union[str, Path], name_key: str = 'ckpt_') -> Optional[int]:
"""List up files in `dir_path` with name_key, then yield maximum suffix number.
Args:
dir_path: path of directory which may contain files whose name include `name_key`
Returns:
None if no-corresponding-file else maximum suffix number
"""
# check directory existence
fs = get_filesystem(dir_path)
if not fs.exists(dir_path):
return None
# check corresponding file existence
files = [os.path.basename(f["name"]) for f in fs.listdir(dir_path)]
files = [x for x in files if name_key in x]
if len(files) == 0:
return None
# extract suffix number
ckpt_vs = []
for name in files:
name = name.split(name_key)[-1]
name = re.sub('[^0-9]', '', name)
ckpt_vs.append(int(name))
return max(ckpt_vs)
def get_max_ckpt_path_from_folder(self, folder_path: Union[str, Path]) -> str:
"""Get path of maximum-epoch checkpoint in the folder."""
max_suffix = self.max_ckpt_in_folder(folder_path)
ckpt_number = max_suffix if max_suffix is not None else 0
return f'{folder_path}/hpc_ckpt_{ckpt_number}.ckpt'
def save_checkpoint(self, filepath, weights_only: bool = False):
"""Save model/training states as a checkpoint file through state-dump and file-write.
Args:
filepath: write-target file's path
weights_only: saving model weights only
"""
# dump states as a checkpoint dictionary object
checkpoint = self.dump_checkpoint(weights_only)
if self.trainer.is_global_zero:
# write the checkpoint dictionary on the file
if self.trainer.accelerator_backend:
checkpoint = self.trainer.accelerator_backend.on_save(checkpoint)
try:
atomic_save(checkpoint, filepath)
except AttributeError as err:
if LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:
del checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
rank_zero_warn(
'Warning, `hyper_parameters` dropped from checkpoint.' f' An attribute is not picklable {err}'
)
atomic_save(checkpoint, filepath)
| [
"torch.cuda.empty_cache"
] | 1.3 | tobiasmaier/pytorch-lightning | 7f352cb69a8202e3f829419657597697ca5d99e2 |
1.1 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import egg.core as core
from egg.core import Callback, Interaction, PrintValidationEvents
from egg.zoo.basic_games.architectures import DiscriReceiver, RecoReceiver, Sender
from egg.zoo.basic_games.data_readers import AttValDiscriDataset, AttValRecoDataset
# the following section specifies parameters that are specific to our games: we will also inherit the
# standard EGG parameters from https://github.com/facebookresearch/EGG/blob/master/egg/core/util.py
def get_params(params):
parser = argparse.ArgumentParser()
# arguments controlling the game type
parser.add_argument(
"--game_type",
type=str,
default="reco",
help="Selects whether to play a reco(nstruction) or discri(mination) game (default: reco)",
)
# arguments concerning the input data and how they are processed
parser.add_argument(
"--train_data", type=str, default=None, help="Path to the train data"
)
parser.add_argument(
"--validation_data", type=str, default=None, help="Path to the validation data"
)
# (the following is only used in the reco game)
parser.add_argument(
"--n_attributes",
type=int,
default=None,
help="Number of attributes in Sender input (must match data set, and it is only used in reco game)",
)
parser.add_argument(
"--n_values",
type=int,
default=None,
help="Number of values for each attribute (must match data set)",
)
parser.add_argument(
"--validation_batch_size",
type=int,
default=0,
help="Batch size when processing validation data, whereas training data batch_size is controlled by batch_size (default: same as training data batch size)",
)
# arguments concerning the training method
parser.add_argument(
"--mode",
type=str,
default="rf",
help="Selects whether Reinforce or Gumbel-Softmax relaxation is used for training {rf, gs} (default: rf)",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="GS temperature for the sender, only relevant in Gumbel-Softmax (gs) mode (default: 1.0)",
)
parser.add_argument(
"--sender_entropy_coeff",
type=float,
default=1e-1,
help="Reinforce entropy regularization coefficient for Sender, only relevant in Reinforce (rf) mode (default: 1e-1)",
)
# arguments concerning the agent architectures
parser.add_argument(
"--sender_cell",
type=str,
default="rnn",
help="Type of the cell used for Sender {rnn, gru, lstm} (default: rnn)",
)
parser.add_argument(
"--receiver_cell",
type=str,
default="rnn",
help="Type of the cell used for Receiver {rnn, gru, lstm} (default: rnn)",
)
parser.add_argument(
"--sender_hidden",
type=int,
default=10,
help="Size of the hidden layer of Sender (default: 10)",
)
parser.add_argument(
"--receiver_hidden",
type=int,
default=10,
help="Size of the hidden layer of Receiver (default: 10)",
)
parser.add_argument(
"--sender_embedding",
type=int,
default=10,
help="Output dimensionality of the layer that embeds symbols produced at previous step in Sender (default: 10)",
)
parser.add_argument(
"--receiver_embedding",
type=int,
default=10,
help="Output dimensionality of the layer that embeds the message symbols for Receiver (default: 10)",
)
# arguments controlling the script output
parser.add_argument(
"--print_validation_events",
default=False,
action="store_true",
help="If this flag is passed, at the end of training the script prints the input validation data, the corresponding messages produced by the Sender, and the output probabilities produced by the Receiver (default: do not print)",
)
args = core.init(parser, params)
return args
def main(params):
opts = get_params(params)
if opts.validation_batch_size == 0:
opts.validation_batch_size = opts.batch_size
print(opts, flush=True)
# the following if statement controls aspects specific to the two game tasks: loss, input data and architecture of the Receiver
# (the Sender is identical in both cases, mapping a single input attribute-value vector to a variable-length message)
if opts.game_type == "discri":
# the game object we will encounter below takes as one of its mandatory arguments a loss: a loss in EGG is expected to take as arguments the sender input,
# the message, the Receiver input, the Receiver output and the labels (although some of these elements might not actually be used by a particular loss);
# together with the actual loss computation, the loss function can return a dictionary with other auxiliary statistics: in this case, accuracy
def loss(
_sender_input,
_message,
_receiver_input,
receiver_output,
labels,
_aux_input,
):
# in the discriminative case, accuracy is computed by comparing the index with highest score in Receiver output (a distribution of unnormalized
# probabilities over target poisitions) and the corresponding label read from input, indicating the ground-truth position of the target
acc = (receiver_output.argmax(dim=1) == labels).detach().float()
# similarly, the loss computes cross-entropy between the Receiver-produced target-position probability distribution and the labels
loss = F.cross_entropy(receiver_output, labels, reduction="none")
return loss, {"acc": acc}
# the input data are read into DataLodaer objects, which are pytorch constructs implementing standard data processing functionalities, such as shuffling
# and batching
# within our games, we implement dataset classes, such as AttValDiscriDataset, to read the input text files and convert the information they contain
# into the form required by DataLoader
# look at the definition of the AttValDiscrDataset (the class to read discrimination game data) in data_readers.py for further details
# note that, for the training dataset, we first instantiate the AttValDiscriDataset object and then feed it to DataLoader, whereas for the
# validation data (confusingly called "test" data due to code heritage inertia) we directly declare the AttValDiscriDataset when instantiating
# DataLoader: the reason for this difference is that we need the train_ds object to retrieve the number of features of the input vectors
train_ds = AttValDiscriDataset(path=opts.train_data, n_values=opts.n_values)
train_loader = DataLoader(
train_ds, batch_size=opts.batch_size, shuffle=True, num_workers=1
)
test_loader = DataLoader(
AttValDiscriDataset(path=opts.validation_data, n_values=opts.n_values),
batch_size=opts.validation_batch_size,
shuffle=False,
num_workers=1,
)
# note that the number of features retrieved here concerns inputs after they are converted to 1-hot vectors
n_features = train_ds.get_n_features()
# we define here the core of the Receiver for the discriminative game, see the architectures.py file for details:
# note that this will be embedded in a wrapper below to define the full agent
receiver = DiscriReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)
else: # reco game
def loss(
sender_input, _message, _receiver_input, receiver_output, labels, _aux_input
):
# in the case of the recognition game, for each attribute we compute a different cross-entropy score
# based on comparing the probability distribution produced by the Receiver over the values of each attribute
# with the corresponding ground truth, and then averaging across attributes
# accuracy is instead computed by considering as a hit only cases where, for each attribute, the Receiver
# assigned the largest probability to the correct value
# most of this function consists of the usual pytorch madness needed to reshape tensors in order to perform these computations
n_attributes = opts.n_attributes
n_values = opts.n_values
batch_size = sender_input.size(0)
receiver_output = receiver_output.view(batch_size * n_attributes, n_values)
receiver_guesses = receiver_output.argmax(dim=1)
correct_samples = (
(receiver_guesses == labels.view(-1))
.view(batch_size, n_attributes)
.detach()
)
acc = (torch.sum(correct_samples, dim=-1) == n_attributes).float()
labels = labels.view(batch_size * n_attributes)
loss = F.cross_entropy(receiver_output, labels, reduction="none")
loss = loss.view(batch_size, -1).mean(dim=1)
return loss, {"acc": acc}
# again, see data_readers.py in this directory for the AttValRecoDataset data reading class
train_loader = DataLoader(
AttValRecoDataset(
path=opts.train_data,
n_attributes=opts.n_attributes,
n_values=opts.n_values,
),
batch_size=opts.batch_size,
shuffle=True,
num_workers=1,
)
test_loader = DataLoader(
AttValRecoDataset(
path=opts.validation_data,
n_attributes=opts.n_attributes,
n_values=opts.n_values,
),
batch_size=opts.validation_batch_size,
shuffle=False,
num_workers=1,
)
# the number of features for the Receiver (input) and the Sender (output) is given by n_attributes*n_values because
# they are fed/produce 1-hot representations of the input vectors
n_features = opts.n_attributes * opts.n_values
# we define here the core of the receiver for the discriminative game, see the architectures.py file for details
# this will be embedded in a wrapper below to define the full architecture
receiver = RecoReceiver(n_features=n_features, n_hidden=opts.receiver_hidden)
# we are now outside the block that defined game-type-specific aspects of the games: note that the core Sender architecture
# (see architectures.py for details) is shared by the two games (it maps an input vector to a hidden layer that will be use to initialize
# the message-producing RNN): this will also be embedded in a wrapper below to define the full architecture
sender = Sender(n_hidden=opts.sender_hidden, n_features=n_features)
# now, we instantiate the full sender and receiver architectures, and connect them and the loss into a game object
# the implementation differs slightly depending on whether communication is optimized via Gumbel-Softmax ('gs') or Reinforce ('rf', default)
if opts.mode.lower() == "gs":
# in the following lines, we embed the Sender and Receiver architectures into standard EGG wrappers that are appropriate for Gumbel-Softmax optimization
# the Sender wrapper takes the hidden layer produced by the core agent architecture we defined above when processing input, and uses it to initialize
# the RNN that generates the message
sender = core.RnnSenderGS(
sender,
vocab_size=opts.vocab_size,
embed_dim=opts.sender_embedding,
hidden_size=opts.sender_hidden,
cell=opts.sender_cell,
max_len=opts.max_len,
temperature=opts.temperature,
)
# the Receiver wrapper takes the symbol produced by the Sender at each step (more precisely, in Gumbel-Softmax mode, a function of the overall probability
# of non-eos symbols upt to the step is used), maps it to a hidden layer through a RNN, and feeds this hidden layer to the
# core Receiver architecture we defined above (possibly with other Receiver input, as determined by the core architecture) to generate the output
receiver = core.RnnReceiverGS(
receiver,
vocab_size=opts.vocab_size,
embed_dim=opts.receiver_embedding,
hidden_size=opts.receiver_hidden,
cell=opts.receiver_cell,
)
game = core.SenderReceiverRnnGS(sender, receiver, loss)
# callback functions can be passed to the trainer object (see below) to operate at certain steps of training and validation
# for example, the TemperatureUpdater (defined in callbacks.py in the core directory) will update the Gumbel-Softmax temperature hyperparameter
# after each epoch
callbacks = [core.TemperatureUpdater(agent=sender, decay=0.9, minimum=0.1)]
else: # NB: any other string than gs will lead to rf training!
# here, the interesting thing to note is that we use the same core architectures we defined above, but now we embed them in wrappers that are suited to
# Reinforce-based optmization
sender = core.RnnSenderReinforce(
sender,
vocab_size=opts.vocab_size,
embed_dim=opts.sender_embedding,
hidden_size=opts.sender_hidden,
cell=opts.sender_cell,
max_len=opts.max_len,
)
receiver = core.RnnReceiverDeterministic(
receiver,
vocab_size=opts.vocab_size,
embed_dim=opts.receiver_embedding,
hidden_size=opts.receiver_hidden,
cell=opts.receiver_cell,
)
game = core.SenderReceiverRnnReinforce(
sender,
receiver,
loss,
sender_entropy_coeff=opts.sender_entropy_coeff,
receiver_entropy_coeff=0,
)
callbacks = []
# we are almost ready to train: we define here an optimizer calling standard pytorch functionality
optimizer = core.build_optimizer(game.parameters())
# in the following statement, we finally instantiate the trainer object with all the components we defined (the game, the optimizer, the data
# and the callbacks)
if opts.print_validation_events == True:
# we add a callback that will print loss and accuracy after each training and validation pass (see ConsoleLogger in callbacks.py in core directory)
# if requested by the user, we will also print a detailed log of the validation pass after full training: look at PrintValidationEvents in
# language_analysis.py (core directory)
trainer = core.Trainer(
game=game,
optimizer=optimizer,
train_data=train_loader,
validation_data=test_loader,
callbacks=callbacks
+ [
core.ConsoleLogger(print_train_loss=True, as_json=True),
core.PrintValidationEvents(n_epochs=opts.n_epochs),
],
)
else:
trainer = core.Trainer(
game=game,
optimizer=optimizer,
train_data=train_loader,
validation_data=test_loader,
callbacks=callbacks
+ [core.ConsoleLogger(print_train_loss=True, as_json=True)],
)
# and finally we train!
trainer.train(n_epochs=opts.n_epochs)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| [
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.sum"
] | 1.1.0 | schlevik/EGG | 428d5aed3eb6fb0296f6856fb77b0a1cdceb33f1 |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Modules to compute the matching cost and solve the corresponding LSAP.
"""
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
from .box_ops import box_cxcywh_to_xyxy, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
tgt_ids = torch.cat([v["labels"] for v in targets])
tgt_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -out_prob[:, tgt_ids]
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.view(bs, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def build_matcher(args):
return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)
| [
"torch.cat",
"torch.no_grad",
"torch.as_tensor",
"torch.cdist"
] | 1.6.0 | yihui8776/TensorRT-DETR | 1f32e9a2f98e26ec5b2376f9a2695193887430fb |
1.4 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tempfile
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.transforms import DataStatsd
TEST_CASE_1 = [
{
"keys": "img",
"prefix": "test data",
"data_shape": False,
"value_range": False,
"data_value": False,
"additional_info": None,
},
{"img": np.array([[0, 1], [1, 2]])},
"test data statistics:",
]
TEST_CASE_2 = [
{
"keys": "img",
"prefix": "test data",
"data_shape": True,
"value_range": False,
"data_value": False,
"additional_info": None,
},
{"img": np.array([[0, 1], [1, 2]])},
"test data statistics:\nShape: (2, 2)",
]
TEST_CASE_3 = [
{
"keys": "img",
"prefix": "test data",
"data_shape": True,
"value_range": True,
"data_value": False,
"additional_info": None,
},
{"img": np.array([[0, 1], [1, 2]])},
"test data statistics:\nShape: (2, 2)\nValue range: (0, 2)",
]
TEST_CASE_4 = [
{
"keys": "img",
"prefix": "test data",
"data_shape": True,
"value_range": True,
"data_value": True,
"additional_info": None,
},
{"img": np.array([[0, 1], [1, 2]])},
"test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]",
]
TEST_CASE_5 = [
{
"keys": "img",
"prefix": "test data",
"data_shape": True,
"value_range": True,
"data_value": True,
"additional_info": lambda x: np.mean(x),
},
{"img": np.array([[0, 1], [1, 2]])},
"test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0",
]
TEST_CASE_6 = [
{
"keys": "img",
"prefix": "test data",
"data_shape": True,
"value_range": True,
"data_value": True,
"additional_info": lambda x: torch.mean(x.float()),
},
{"img": torch.tensor([[0, 1], [1, 2]])},
(
"test data statistics:\nShape: torch.Size([2, 2])\nValue range: (0, 2)\n"
"Value: tensor([[0, 1],\n [1, 2]])\nAdditional info: 1.0"
),
]
TEST_CASE_7 = [
{
"keys": ("img", "affine"),
"prefix": ("image", "affine"),
"data_shape": True,
"value_range": (True, False),
"data_value": (False, True),
"additional_info": (lambda x: np.mean(x), None),
},
{"img": np.array([[0, 1], [1, 2]]), "affine": np.eye(2, 2)},
"affine statistics:\nShape: (2, 2)\nValue: [[1. 0.]\n [0. 1.]]",
]
TEST_CASE_8 = [
{"img": np.array([[0, 1], [1, 2]])},
"test data statistics:\nShape: (2, 2)\nValue range: (0, 2)\nValue: [[0 1]\n [1 2]]\nAdditional info: 1.0\n",
]
class TestDataStatsd(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7])
def test_value(self, input_param, input_data, expected_print):
transform = DataStatsd(**input_param)
_ = transform(input_data)
self.assertEqual(transform.printer.output, expected_print)
@parameterized.expand([TEST_CASE_8])
def test_file(self, input_data, expected_print):
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, "test_stats.log")
handler = logging.FileHandler(filename, mode="w")
input_param = {
"keys": "img",
"prefix": "test data",
"data_shape": True,
"value_range": True,
"data_value": True,
"additional_info": lambda x: np.mean(x),
"logger_handler": handler,
}
transform = DataStatsd(**input_param)
_ = transform(input_data)
handler.stream.close()
transform.printer._logger.removeHandler(handler)
with open(filename, "r") as f:
content = f.read()
self.assertEqual(content, expected_print)
if __name__ == "__main__":
unittest.main()
| [
"torch.tensor"
] | 1.4 | Irme/MONAI | 49e693c4e7df83dc1f8ab87349373de9263188a9 |
1.8 | import contextlib
import json
import logging
import os
from typing import Any, Dict, Optional
from unittest import mock
import pytest
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
from pytorch_lightning import LightningDataModule, LightningModule, seed_everything, Trainer
from pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DeepSpeedPrecisionPlugin
from pytorch_lightning.strategies import DeepSpeedStrategy
from pytorch_lightning.strategies.deepspeed import LightningDeepSpeedModule
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE
from pytorch_lightning.utilities.meta import init_meta_context
from tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
if _DEEPSPEED_AVAILABLE:
import deepspeed
from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict
class ModelParallelBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.layer = None
def configure_sharded_model(self) -> None:
self.layer = torch.nn.Linear(32, 2)
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.configure_sharded_model()
class ModelParallelBoringModelNoSchedulers(ModelParallelBoringModel):
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
class ModelParallelBoringModelManualOptim(BoringModel):
def __init__(self):
super().__init__()
self.layer = None
def training_step(self, batch, batch_idx):
opt = self.optimizers()
output = self(batch)
loss = self.loss(batch, output)
opt.zero_grad()
self.manual_backward(loss)
opt.step()
def configure_sharded_model(self) -> None:
self.layer = torch.nn.Linear(32, 2)
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.configure_sharded_model()
@property
def automatic_optimization(self) -> bool:
return False
def test_deepspeed_lightning_module(tmpdir):
"""Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves types and device correctly."""
model = BoringModel()
module = LightningDeepSpeedModule(model, precision=16)
module.half()
assert module.dtype == torch.half
assert model.dtype == torch.half
module.to(torch.double)
assert module.dtype == torch.double
assert model.dtype == torch.double
@RunIf(min_gpus=1)
def test_deepspeed_lightning_module_precision(tmpdir):
"""Test to ensure that a model wrapped in `LightningDeepSpeedModule` moves tensors to half when precision
16."""
model = BoringModel()
module = LightningDeepSpeedModule(model, precision=16)
module.cuda().half()
assert module.dtype == torch.half
assert model.dtype == torch.half
x = torch.randn((1, 32), dtype=torch.float).cuda()
out = module(x)
assert out.dtype == torch.half
module.to(torch.double)
assert module.dtype == torch.double
assert model.dtype == torch.double
@pytest.fixture
def deepspeed_config():
return {
"optimizer": {"type": "SGD", "params": {"lr": 3e-5}},
"scheduler": {
"type": "WarmupLR",
"params": {"last_batch_iteration": -1, "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 100},
},
}
@pytest.fixture
def deepspeed_zero_config(deepspeed_config):
return {**deepspeed_config, "zero_allow_untested_optimizer": True, "zero_optimization": {"stage": 2}}
@RunIf(deepspeed=True)
@pytest.mark.parametrize("strategy", ("deepspeed", DeepSpeedStrategy))
def test_deepspeed_strategy_string(tmpdir, strategy):
"""Test to ensure that the strategy can be passed via string or instance, and parallel devices is correctly
set."""
trainer = Trainer(
fast_dev_run=True, default_root_dir=tmpdir, strategy=strategy if isinstance(strategy, str) else strategy()
)
assert isinstance(trainer.strategy, DeepSpeedStrategy)
assert trainer.strategy.parallel_devices == [torch.device("cpu")]
@RunIf(deepspeed=True)
def test_deepspeed_strategy_env(tmpdir, monkeypatch, deepspeed_config):
"""Test to ensure that the strategy can be passed via a string with an environment variable."""
config_path = os.path.join(tmpdir, "temp.json")
with open(config_path, "w") as f:
f.write(json.dumps(deepspeed_config))
monkeypatch.setenv("PL_DEEPSPEED_CONFIG_PATH", config_path)
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, strategy="deepspeed")
strategy = trainer.strategy
assert isinstance(strategy, DeepSpeedStrategy)
assert strategy.parallel_devices == [torch.device("cpu")]
assert strategy.config == deepspeed_config
@RunIf(deepspeed=True)
@pytest.mark.parametrize("precision", [16, "mixed"])
@pytest.mark.parametrize(
"amp_backend",
["native", pytest.param("apex", marks=RunIf(amp_apex=True))],
)
def test_deepspeed_precision_choice(amp_backend, precision, tmpdir):
"""Test to ensure precision plugin is also correctly chosen.
DeepSpeed handles precision via Custom DeepSpeedPrecisionPlugin
"""
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
accelerator="gpu",
strategy="deepspeed",
amp_backend=amp_backend,
precision=precision,
)
assert isinstance(trainer.strategy, DeepSpeedStrategy)
assert isinstance(trainer.strategy.precision_plugin, DeepSpeedPrecisionPlugin)
assert trainer.strategy.precision_plugin.precision == precision
@RunIf(deepspeed=True)
def test_deepspeed_with_invalid_config_path(tmpdir):
"""Test to ensure if we pass an invalid config path we throw an exception."""
with pytest.raises(
MisconfigurationException, match="You passed in a path to a DeepSpeed config but the path does not exist"
):
DeepSpeedStrategy(config="invalid_path.json")
@RunIf(deepspeed=True)
def test_deepspeed_with_env_path(tmpdir, monkeypatch, deepspeed_config):
"""Test to ensure if we pass an env variable, we load the config from the path."""
config_path = os.path.join(tmpdir, "temp.json")
with open(config_path, "w") as f:
f.write(json.dumps(deepspeed_config))
monkeypatch.setenv("PL_DEEPSPEED_CONFIG_PATH", config_path)
strategy = DeepSpeedStrategy()
assert strategy.config == deepspeed_config
@RunIf(deepspeed=True)
def test_deepspeed_defaults(tmpdir):
"""Ensure that defaults are correctly set as a config for DeepSpeed if no arguments are passed."""
strategy = DeepSpeedStrategy()
assert strategy.config is not None
assert isinstance(strategy.config["zero_optimization"], dict)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_warn_deepspeed_ignored(tmpdir):
class TestModel(BoringModel):
def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
return loss.backward()
model = TestModel()
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(),
accelerator="gpu",
devices=1,
precision=16,
track_grad_norm=2,
)
from pytorch_lightning.plugins.precision.deepspeed import warning_cache
with pytest.warns(UserWarning, match="will be ignored since DeepSpeed handles the backward"):
trainer.fit(model)
assert any("track_grad_norm=2.0)' but this is not supported" in w for w in warning_cache)
@RunIf(min_gpus=1, deepspeed=True)
@pytest.mark.parametrize(
["dataset_cls", "value"],
[(RandomDataset, "auto"), (RandomDataset, 10), (RandomIterableDataset, "auto"), (RandomIterableDataset, 10)],
)
@mock.patch("deepspeed.init_distributed", autospec=True)
@mock.patch("pytorch_lightning.Trainer.log_dir", new_callable=mock.PropertyMock, return_value="abc")
def test_deepspeed_auto_batch_size_config_select(mock_deepspeed_distributed, mock_log_dir, tmpdir, dataset_cls, value):
"""Test to ensure that the batch size is correctly set as expected for deepspeed logging purposes."""
class TestModel(BoringModel):
def train_dataloader(self):
return DataLoader(dataset_cls(32, 64))
class AssertCallback(Callback):
def setup(self, trainer, pl_module, stage: Optional[str] = None) -> None:
assert isinstance(trainer.strategy, DeepSpeedStrategy)
config = trainer.strategy.config
# int value overrides auto mode
expected_value = value if isinstance(value, int) else 1
if dataset_cls == RandomDataset:
expected_value = pl_module.train_dataloader().batch_size if value == "auto" else value
assert config["train_micro_batch_size_per_gpu"] == expected_value
raise SystemExit
ck = AssertCallback()
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
callbacks=ck,
gpus=1,
strategy=DeepSpeedStrategy(logging_batch_size_per_gpu=value, zero_optimization=False),
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_run_configure_optimizers(tmpdir):
"""Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation), whilst using
configure_optimizers for optimizers and schedulers."""
class TestCB(Callback):
def on_train_start(self, trainer, pl_module) -> None:
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)
assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)
assert isinstance(trainer.lr_scheduler_configs[0].scheduler, torch.optim.lr_scheduler.StepLR)
# check that the lr_scheduler config was preserved
assert trainer.lr_scheduler_configs[0].name == "Sean"
class TestModel(BoringModel):
def configure_optimizers(self):
[optimizer], [scheduler] = super().configure_optimizers()
return {"optimizer": optimizer, "lr_scheduler": {"scheduler": scheduler, "name": "Sean"}}
model = TestModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
strategy=DeepSpeedStrategy(), # disable ZeRO so our optimizers are not wrapped
default_root_dir=tmpdir,
accelerator="gpu",
devices=1,
fast_dev_run=True,
precision=16,
callbacks=[TestCB(), lr_monitor],
)
trainer.fit(model)
assert lr_monitor.lrs == {"Sean": [0.1]}
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_config(tmpdir, deepspeed_zero_config):
"""Test to ensure deepspeed works correctly when passed a DeepSpeed config object including
optimizers/schedulers and saves the model weights to load correctly."""
class TestCB(Callback):
def on_train_start(self, trainer, pl_module) -> None:
from deepspeed.runtime.lr_schedules import WarmupLR
from deepspeed.runtime.zero.stage2 import FP16_DeepSpeedZeroOptimizer
assert isinstance(trainer.optimizers[0], FP16_DeepSpeedZeroOptimizer)
assert isinstance(trainer.optimizers[0].optimizer, torch.optim.SGD)
assert isinstance(trainer.lr_scheduler_configs[0].scheduler, WarmupLR)
assert trainer.lr_scheduler_configs[0].interval == "step"
assert trainer.lr_scheduler_configs[0].opt_idx == 0
model = BoringModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
strategy=DeepSpeedStrategy(config=deepspeed_zero_config),
default_root_dir=tmpdir,
accelerator="gpu",
devices=1,
log_every_n_steps=1,
limit_train_batches=4,
limit_val_batches=4,
limit_test_batches=4,
max_epochs=2,
precision=16,
callbacks=[TestCB(), lr_monitor],
)
trainer.fit(model)
trainer.test(model)
assert list(lr_monitor.lrs) == ["lr-SGD"]
assert len(set(lr_monitor.lrs["lr-SGD"])) == 8
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_custom_precision_params(tmpdir):
"""Ensure if we modify the FP16 parameters via the DeepSpeedStrategy, the deepspeed config contains these
changes."""
class TestCB(Callback):
def on_train_start(self, trainer, pl_module) -> None:
assert trainer.strategy.config["fp16"]["loss_scale"] == 10
assert trainer.strategy.config["fp16"]["initial_scale_power"] == 10
assert trainer.strategy.config["fp16"]["loss_scale_window"] == 10
assert trainer.strategy.config["fp16"]["hysteresis"] == 10
assert trainer.strategy.config["fp16"]["min_loss_scale"] == 10
raise SystemExit()
model = BoringModel()
ds = DeepSpeedStrategy(
loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10
)
trainer = Trainer(
default_root_dir=tmpdir, strategy=ds, precision=16, accelerator="gpu", devices=1, callbacks=[TestCB()]
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(deepspeed=True)
def test_deepspeed_custom_activation_checkpointing_params(tmpdir):
"""Ensure if we modify the activation checkpointing parameters, the deepspeed config contains these changes."""
ds = DeepSpeedStrategy(
partition_activations=True,
cpu_checkpointing=True,
contiguous_memory_optimization=True,
synchronize_checkpoint_boundary=True,
)
checkpoint_config = ds.config["activation_checkpointing"]
assert checkpoint_config["partition_activations"]
assert checkpoint_config["cpu_checkpointing"]
assert checkpoint_config["contiguous_memory_optimization"]
assert checkpoint_config["synchronize_checkpoint_boundary"]
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_custom_activation_checkpointing_params_forwarded(tmpdir):
"""Ensure if we modify the activation checkpointing parameters, we pass these to
deepspeed.checkpointing.configure correctly."""
ds = DeepSpeedStrategy(
partition_activations=True,
cpu_checkpointing=True,
contiguous_memory_optimization=True,
synchronize_checkpoint_boundary=True,
)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
fast_dev_run=1,
strategy=ds,
precision=16,
accelerator="gpu",
devices=1,
)
with mock.patch(
"deepspeed.checkpointing.configure", wraps=deepspeed.checkpointing.configure
) as deepspeed_checkpointing_configure:
trainer.fit(model)
deepspeed_checkpointing_configure.assert_called_with(
mpu_=None, partition_activations=True, contiguous_checkpointing=True, checkpoint_in_cpu=True, profile=None
)
@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config):
"""Ensure if we use a config and turn off offload_optimizer, that this is set to False within the config."""
deepspeed_zero_config["zero_optimization"]["offload_optimizer"] = False
class TestCallback(Callback):
def setup(self, trainer, pl_module, stage=None) -> None:
assert trainer.strategy.config["zero_optimization"]["offload_optimizer"] is False
raise SystemExit()
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
max_epochs=1,
strategy=DeepSpeedStrategy(config=deepspeed_zero_config),
precision=16,
gpus=1,
callbacks=[TestCallback()],
)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multigpu(tmpdir):
"""Test to ensure that DeepSpeed with multiple GPUs works and deepspeed distributed is initialized
correctly."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
fast_dev_run=True,
precision=16,
)
with mock.patch("deepspeed.init_distributed", wraps=deepspeed.init_distributed) as mock_deepspeed_distributed:
trainer.fit(model)
mock_deepspeed_distributed.assert_called_once()
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_fp32_works(tmpdir):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, accelerator="gpu", devices=1, strategy="deepspeed_stage_3", fast_dev_run=True
)
trainer.fit(model)
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_stage_3_save_warning(tmpdir):
"""Test to ensure that DeepSpeed Stage 3 gives a warning when saving on rank zero."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
# both ranks need to call save checkpoint, however only rank 0 needs to check the warning
context_manager = (
pytest.warns(UserWarning, match="each worker will save a shard of the checkpoint within a directory.")
if trainer.is_global_zero
else contextlib.suppress()
)
with context_manager:
trainer.save_checkpoint(checkpoint_path)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_single_file(tmpdir):
"""Test to ensure that DeepSpeed loads from a single file checkpoint."""
model = BoringModel()
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
trainer.save_checkpoint(checkpoint_path)
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=1,
fast_dev_run=True,
precision=16,
)
strategy = trainer.strategy
assert isinstance(strategy, DeepSpeedStrategy)
assert not strategy.load_full_weights
with pytest.raises(MisconfigurationException, match="DeepSpeed was unable to load the checkpoint."):
trainer.test(model, ckpt_path=checkpoint_path)
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),
accelerator="gpu",
devices=1,
fast_dev_run=True,
precision=16,
)
strategy = trainer.strategy
assert isinstance(strategy, DeepSpeedStrategy)
assert strategy.load_full_weights
trainer.test(model, ckpt_path=checkpoint_path)
class ModelParallelClassificationModel(LightningModule):
def __init__(self, lr: float = 0.01, num_blocks: int = 5):
super().__init__()
self.lr = lr
self.num_blocks = num_blocks
self.prepare_data_per_node = True
self.train_acc = Accuracy()
self.valid_acc = Accuracy()
self.test_acc = Accuracy()
def make_block(self):
return nn.Sequential(nn.Linear(32, 32, bias=False), nn.ReLU())
def configure_sharded_model(self) -> None:
self.model = nn.Sequential(*(self.make_block() for x in range(self.num_blocks)), nn.Linear(32, 3))
def forward(self, x):
x = self.model(x)
# Ensure output is in float32 for softmax operation
x = x.float()
logits = F.softmax(x, dim=1)
return logits
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", self.train_acc(logits, y), prog_bar=True, sync_dist=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("val_loss", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)
self.log("val_acc", self.valid_acc(logits, y), prog_bar=True, sync_dist=True)
def test_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
self.log("test_loss", F.cross_entropy(logits, y), prog_bar=False, sync_dist=True)
self.log("test_acc", self.test_acc(logits, y), prog_bar=True, sync_dist=True)
def predict_step(self, batch, batch_idx, dataloader_idx=0):
x, y = batch
logits = self.forward(x)
self.test_acc(logits, y)
return self.test_acc.compute()
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
return [optimizer], [{"scheduler": lr_scheduler, "interval": "step"}]
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
if not hasattr(self, "model"):
self.configure_sharded_model()
# Lightning saves the lr schedulers, but DeepSpeed saves the optimizer states separately
assert len(checkpoint["lr_schedulers"]) == 1
assert "optimizer_states" not in checkpoint
class ManualModelParallelClassificationModel(ModelParallelClassificationModel):
@property
def automatic_optimization(self) -> bool:
return False
def training_step(self, batch, batch_idx):
x, y = batch
logits = self.forward(x)
loss = F.cross_entropy(logits, y)
opt = self.optimizers()
self.log("train_loss", loss, prog_bar=True)
self.log("train_acc", self.train_acc(logits, y), prog_bar=True, sync_dist=True)
opt.zero_grad()
self.manual_backward(loss)
opt.step()
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_stage_3(tmpdir, deepspeed_config):
"""Test to ensure ZeRO Stage 3 works with a parallel model."""
model = ModelParallelBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_stage_3_manual_optimization(tmpdir, deepspeed_config):
"""Test to ensure ZeRO Stage 3 works with a parallel model."""
model = ModelParallelBoringModelManualOptim()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
trainer.test(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@pytest.mark.parametrize(("accumulate_grad_batches", "automatic_optimization"), [(1, False), (2, True)])
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_stage_3_checkpointing(tmpdir, automatic_optimization, accumulate_grad_batches):
seed_everything(1)
if automatic_optimization:
model = ModelParallelClassificationModel()
else:
model = ManualModelParallelClassificationModel()
dm = ClassifDataModule()
ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=10,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
precision=16,
accumulate_grad_batches=accumulate_grad_batches,
callbacks=[ck],
)
trainer.fit(model, datamodule=dm)
results = trainer.test(datamodule=dm)
assert results[0]["test_acc"] > 0.7
saved_results = trainer.test(ckpt_path=ck.best_model_path, datamodule=dm)
assert saved_results[0]["test_acc"] > 0.7
assert saved_results == results
if automatic_optimization:
model = ModelParallelClassificationModel()
else:
model = ManualModelParallelClassificationModel()
trainer = Trainer(default_root_dir=tmpdir, gpus=2, strategy=DeepSpeedStrategy(stage=3), precision=16)
results = trainer.test(model, datamodule=dm, ckpt_path=ck.best_model_path)
assert results[0]["test_acc"] > 0.7
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_stage_3_warns_resume_training(tmpdir):
"""Test to ensure with Stage 3 and multiple GPUs that we can resume from training, throwing a warning that the
optimizer state and scheduler states cannot be restored."""
dm = ClassifDataModule()
model = BoringModel()
checkpoint_path = os.path.join(tmpdir, "model.pt")
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
trainer.save_checkpoint(checkpoint_path)
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),
accelerator="gpu",
devices=1,
precision=16,
)
with pytest.warns(
UserWarning,
match="A single checkpoint file has been given. This means optimizer states cannot be restored. "
"If you'd like to restore these states, you must "
"provide a path to the originally saved DeepSpeed checkpoint.",
):
trainer.fit(model, datamodule=dm, ckpt_path=checkpoint_path)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_stage_3_resume_training(tmpdir):
"""Test to ensure with Stage 3 and single GPU that we can resume training."""
initial_model = ModelParallelClassificationModel()
dm = ClassifDataModule()
ck = ModelCheckpoint(monitor="val_acc", mode="max", save_last=True, save_top_k=-1)
initial_trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=1,
precision=16,
callbacks=[ck],
enable_progress_bar=False,
enable_model_summary=False,
)
initial_trainer.fit(initial_model, datamodule=dm)
class TestCallback(Callback):
def on_train_batch_start(
self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int
) -> None:
original_deepspeed_strategy = initial_trainer.strategy
current_deepspeed_strategy = trainer.strategy
assert isinstance(original_deepspeed_strategy, DeepSpeedStrategy)
assert isinstance(current_deepspeed_strategy, DeepSpeedStrategy)
# assert optimizer states are the correctly loaded
original_optimizer_dict = original_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()
current_optimizer_dict = current_deepspeed_strategy.deepspeed_engine.optimizer.state_dict()
for orig_tensor, current_tensor in zip(
original_optimizer_dict["fp32_flat_groups"], current_optimizer_dict["fp32_flat_groups"]
):
assert torch.all(orig_tensor.eq(current_tensor))
# assert model state is loaded correctly
for current_param, initial_param in zip(pl_module.parameters(), initial_model.parameters()):
assert torch.equal(current_param.cpu(), initial_param.cpu())
# assert epoch has correctly been restored
assert trainer.current_epoch == 1
# assert lr-scheduler states are loaded correctly
original_lr_scheduler = initial_trainer.lr_scheduler_configs[0].scheduler
current_lr_scheduler = trainer.lr_scheduler_configs[0].scheduler
assert original_lr_scheduler.state_dict() == current_lr_scheduler.state_dict()
model = ModelParallelClassificationModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=1,
precision=16,
callbacks=TestCallback(),
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model, datamodule=dm, ckpt_path=ck.best_model_path)
@pytest.mark.parametrize("offload_optimizer", [False, True])
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_stage_2_accumulated_grad_batches(tmpdir, offload_optimizer):
"""Test to ensure with Stage 2 and multiple GPUs, accumulated grad batches works."""
seed_everything(42)
class VerificationCallback(Callback):
def __init__(self):
self.on_train_batch_start_called = False
def on_train_batch_start(self, trainer, pl_module: LightningModule, batch: Any, batch_idx: int) -> None:
deepspeed_engine = trainer.strategy.model
assert trainer.global_step == deepspeed_engine.global_steps
self.on_train_batch_start_called = True
model = ModelParallelClassificationModel()
dm = ClassifDataModule()
verification_callback = VerificationCallback()
trainer = Trainer(
default_root_dir=tmpdir,
enable_progress_bar=False,
# TODO: this test fails with max_epochs >1 as there are leftover batches per epoch.
# there's divergence in how Lightning handles the last batch of the epoch with how DeepSpeed does it.
# we step the optimizers on the last batch but DeepSpeed keeps the accumulation for the next epoch
max_epochs=1,
strategy=DeepSpeedStrategy(stage=2, offload_optimizer=offload_optimizer),
accelerator="gpu",
devices=2,
limit_train_batches=5,
limit_val_batches=2,
precision=16,
accumulate_grad_batches=2,
callbacks=[verification_callback],
)
assert trainer.limit_train_batches % trainer.accumulate_grad_batches != 0, "leftover batches should be tested"
trainer.fit(model, datamodule=dm)
assert verification_callback.on_train_batch_start_called
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_test(tmpdir):
"""Test to ensure we can use DeepSpeed with just test using ZeRO Stage 3."""
model = ModelParallelBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
fast_dev_run=True,
precision=16,
)
trainer.test(model)
# TODO(Sean): Once partial parameter partitioning is supported this test should be re-enabled
@pytest.mark.skip("Partial parameter partitioning for DeepSpeed is currently broken.")
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_partial_partition_parameters(tmpdir):
"""Test to ensure that a module that defines a layer inside the ``__init__`` and ``configure_sharded_model``
correctly converts all parameters to float16 when ``precision=16`` and runs successfully."""
class TestModel(ModelParallelBoringModel):
def __init__(self):
super().__init__()
self.layer_2 = torch.nn.Linear(32, 32)
def configure_sharded_model(self) -> None:
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
x = self.layer_2(x)
return self.layer(x)
def on_train_epoch_start(self) -> None:
assert all([x.dtype == torch.float16 for x in self.parameters()])
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=1,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_test_rnn(tmpdir):
"""Test to ensure that turning off explicit partitioning of the entire module for ZeRO Stage 3 works when
training with certain layers which will crash with explicit partitioning."""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.rnn = torch.nn.GRU(32, 32)
def on_train_epoch_start(self) -> None:
assert all([x.dtype == torch.float16 for x in self.parameters()])
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=1,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
@RunIf(deepspeed=True)
@mock.patch("deepspeed.init_distributed", autospec=True)
@pytest.mark.parametrize("platform", ["Linux", "Windows"])
def test_deepspeed_strategy_env_variables(mock_deepspeed_distributed, tmpdir, platform):
"""Test to ensure that we setup distributed communication using correctly.
When using windows, ranks environment variables should not be set, and deepspeed should handle this.
"""
trainer = Trainer(default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3))
strategy = trainer.strategy
assert isinstance(strategy, DeepSpeedStrategy)
with mock.patch("platform.system", return_value=platform) as mock_platform:
strategy._init_deepspeed_distributed()
mock_deepspeed_distributed.assert_called()
mock_platform.assert_called()
if platform == "Windows":
# assert no env variables have been set within the DeepSpeedStrategy
assert all(k not in os.environ for k in ("MASTER_PORT", "MASTER_ADDR", "RANK", "WORLD_SIZE", "LOCAL_RANK"))
else:
assert os.environ["MASTER_ADDR"] == str(trainer.strategy.cluster_environment.main_address)
assert os.environ["MASTER_PORT"] == str(trainer.strategy.cluster_environment.main_port)
assert os.environ["RANK"] == str(trainer.strategy.global_rank)
assert os.environ["WORLD_SIZE"] == str(trainer.strategy.world_size)
assert os.environ["LOCAL_RANK"] == str(trainer.strategy.local_rank)
def _assert_save_model_is_equal(model, tmpdir, trainer):
checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.strategy.barrier()
# carry out the check only on rank 0
if trainer.is_global_zero:
single_ckpt_path = os.path.join(tmpdir, "single_model.pt")
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, single_ckpt_path)
state_dict = torch.load(single_ckpt_path)
model = model.cpu()
# Assert model parameters are identical after loading
for orig_param, saved_model_param in zip(model.parameters(), state_dict.values()):
if model.dtype == torch.half:
# moved model to float32 for comparison with single fp32 saved weights
saved_model_param = saved_model_param.half()
assert torch.equal(orig_param, saved_model_param)
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multigpu_no_schedulers(tmpdir):
"""Test to ensure ZeRO Stage 3 works with a parallel model and no schedulers."""
model = ModelParallelBoringModelNoSchedulers()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
_assert_save_model_is_equal(model, tmpdir, trainer)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_skip_backward_raises(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
return None
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(),
accelerator="gpu",
devices=1,
fast_dev_run=True,
precision=16,
)
with pytest.raises(MisconfigurationException, match="returning `None` .* is not supported"):
trainer.fit(model)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_setup_train_dataloader(tmpdir):
"""Test DeepSpeed works when setup is required to call in the DataModule."""
class TestSetupIsCalledDataModule(LightningDataModule):
def __init__(self):
super().__init__()
self._setup = False
def setup(self, stage: Optional[str] = None) -> None:
self._setup = True
def train_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
def val_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
def test_dataloader(self):
assert self._setup
return DataLoader(RandomDataset(32, 64), batch_size=2)
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(logging_level=logging.INFO),
accelerator="gpu",
devices=1,
fast_dev_run=True,
)
dm = TestSetupIsCalledDataModule()
with mock.patch("deepspeed.utils.logging.logger.warning", autospec=True) as mock_object:
trainer.fit(model, datamodule=dm)
assert any("Tried to infer the batch size" in str(arg) for arg in mock_object.call_args_list)
@mock.patch("torch.optim.lr_scheduler.StepLR.step", autospec=True)
@pytest.mark.parametrize("interval", ["step", "epoch"])
@pytest.mark.parametrize("max_epoch", [2])
@pytest.mark.parametrize("limit_train_batches", [2])
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_scheduler_step_count(mock_step, max_epoch, limit_train_batches, interval):
"""Test to ensure that the scheduler is called the correct amount of times during training when scheduler is
set to step or epoch."""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
return {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": scheduler, "interval": interval},
}
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=limit_train_batches,
limit_val_batches=0,
max_epochs=max_epoch,
accelerator="gpu",
devices=1,
strategy="deepspeed",
)
trainer.fit(model)
if interval == "epoch":
# assert called once at init and once during training
assert mock_step.call_count == 1 + max_epoch
else:
# assert called once at init and once during training
assert mock_step.call_count == 1 + (max_epoch * limit_train_batches)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_configure_gradient_clipping(tmpdir):
"""Test to ensure that a warning is raised when `LightningModule.configure_gradient_clipping` is overridden in
case of deepspeed."""
class TestModel(BoringModel):
def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm):
if optimizer_idx == 0:
self.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
accelerator="gpu",
devices=1,
strategy="deepspeed",
fast_dev_run=True,
)
with pytest.warns(UserWarning, match="handles gradient clipping internally"):
trainer.fit(model)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_deepspeed_gradient_clip_by_value(tmpdir):
"""Test to ensure that an exception is raised when using `gradient_clip_algorithm='value'`."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
accelerator="gpu",
devices=1,
strategy="deepspeed",
gradient_clip_algorithm="value",
)
with pytest.raises(MisconfigurationException, match="does not support clipping gradients by value"):
trainer.fit(model)
@RunIf(min_gpus=1, standalone=True, deepspeed=True)
def test_different_accumulate_grad_batches_fails(tmpdir):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, accumulate_grad_batches={1: 2}, accelerator="gpu", devices=1, strategy="deepspeed"
)
with pytest.raises(
MisconfigurationException, match="DeepSpeed currently does not support different `accumulate_grad_batches`"
):
trainer.fit(model)
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_specific_gpu_device_id(tmpdir):
class TestCallback(Callback):
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert model.device.index == 1
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
) -> None:
assert batch.device.index == 1
def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
assert model.device.index == 1
def on_test_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
assert batch.device.index == 1
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
accelerator="gpu",
devices=[1],
strategy="deepspeed",
callbacks=TestCallback(),
)
trainer.fit(model)
trainer.test(model)
@RunIf(min_gpus=2, min_torch="1.10.0", standalone=True, deepspeed=True)
def test_deepspeed_with_meta_device(tmpdir):
with init_meta_context():
model = BoringModel()
assert model.layer.weight.device.type == "meta"
trainer = Trainer(
default_root_dir=tmpdir,
strategy=DeepSpeedStrategy(stage=3),
accelerator="gpu",
devices=2,
fast_dev_run=True,
precision=16,
)
trainer.fit(model)
assert model.layer.weight.device.type == "cpu"
@RunIf(min_gpus=2, standalone=True, deepspeed=True)
def test_deepspeed_multi_save_same_filepath(tmpdir):
"""Test that verifies that deepspeed saves only latest checkpoint in the specified path and deletes the old
sharded checkpoints."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy="deepspeed",
accelerator="gpu",
devices=2,
callbacks=[ModelCheckpoint(save_top_k=1, save_last=True)],
limit_train_batches=1,
limit_val_batches=0,
num_sanity_val_steps=0,
max_epochs=2,
)
trainer.fit(model)
ckpt_path = os.path.join(trainer.checkpoint_callback.dirpath, "last.ckpt")
expected = ["latest", "zero_to_fp32.py", "checkpoint"]
assert set(expected) == set(os.listdir(ckpt_path))
| [
"torch.nn.Linear",
"torch.device",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.GRU",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.ReLU",
"torch.nn.functional.cross_entropy",
"torch.load",
"torch.nn.functional.softmax",
"torch.equal",
"torch.randn"
] | 1.8 | neptune-ml/pytorch-lightning | 3bcaed52454f3e6c3bce5513032e34302e5b1bb6 |
1.8 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, Iterator
from unittest import mock
from unittest.mock import ANY
import pytest
import torch
from torch.utils.data.dataloader import _MultiProcessingDataLoaderIter, DataLoader
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning.loops import EvaluationLoop, Loop, TrainingBatchLoop, TrainingEpochLoop
from pytorch_lightning.trainer.progress import BaseProgress
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
class NestedLoop(Loop):
def __init__(self):
super().__init__()
self.child_loop0 = None
self.child_loop1 = None
@property
def done(self) -> bool:
return False
def connect(self, child0, child1):
self.child_loop0 = child0
self.child_loop1 = child1
def reset(self) -> None:
pass
def advance(self, *args, **kwargs):
pass
@pytest.mark.parametrize("loop_name", ["fit_loop", "validate_loop", "test_loop", "predict_loop"])
def test_connect_loops_direct(loop_name):
"""Test Trainer references in loops on assignment."""
loop = NestedLoop()
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = loop.trainer
trainer = Trainer()
# trainer.loop_name = loop
setattr(trainer, loop_name, loop)
assert loop.trainer is trainer
def test_connect_loops_recursive():
"""Test Trainer references in a nested loop assigned to a Trainer."""
main_loop = NestedLoop()
child0 = NestedLoop()
child1 = NestedLoop()
main_loop.connect(child0, child1)
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = main_loop.trainer
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = main_loop.child_loop0.trainer
trainer = Trainer()
trainer.fit_loop = main_loop
assert child0.trainer is trainer
assert child1.trainer is trainer
def test_restarting_loops_recursive():
class MyLoop(NestedLoop):
def __init__(self, loop=None):
super().__init__()
self.child = loop
loop = MyLoop(MyLoop(MyLoop()))
assert not loop.restarting
assert not loop.child.restarting
assert not loop.child.child.restarting
loop.restarting = True
assert loop.restarting
assert loop.child.restarting
assert loop.child.child.restarting
def test_connect_subloops(tmpdir):
"""Test connecting individual subloops by calling `trainer.x.y.connect()`"""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
epoch_loop = trainer.fit_loop.epoch_loop
new_batch_loop = TrainingBatchLoop()
epoch_loop.connect(batch_loop=new_batch_loop)
assert epoch_loop.batch_loop is new_batch_loop
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = new_batch_loop.trainer
trainer.fit(model)
assert new_batch_loop.trainer is trainer
def test_replace_loops():
class TestLoop(TrainingEpochLoop):
def __init__(self, foo):
super().__init__()
trainer = Trainer(min_steps=123, max_steps=321)
with pytest.raises(
MisconfigurationException, match=r"FitLoop.replace\(TestLoop\)`.*`__init__`.*`TrainingEpochLoop`"
):
trainer.fit_loop.replace(epoch_loop=TestLoop)
class TestLoop(TrainingEpochLoop):
...
# test passing a loop where previous state should be connected
old_loop = trainer.fit_loop.epoch_loop
trainer.fit_loop.replace(epoch_loop=TestLoop)
new_loop = trainer.fit_loop.epoch_loop
assert isinstance(new_loop, TestLoop)
assert trainer.fit_loop.epoch_loop is new_loop
assert new_loop.min_steps == 123
assert new_loop.max_steps == 321
assert new_loop.batch_loop is old_loop.batch_loop
assert new_loop.val_loop is old_loop.val_loop
assert new_loop.trainer is trainer
class MyBatchLoop(TrainingBatchLoop):
...
class MyEvalLoop(EvaluationLoop):
...
# test passing more than one where one is an instance and the other a class
trainer.fit_loop.epoch_loop.replace(batch_loop=MyBatchLoop, val_loop=MyEvalLoop())
new_batch_loop = trainer.fit_loop.epoch_loop.batch_loop
new_val_loop = trainer.fit_loop.epoch_loop.val_loop
assert isinstance(new_batch_loop, MyBatchLoop)
assert isinstance(new_val_loop, MyEvalLoop)
class CustomException(Exception):
pass
def test_loop_restore():
class Simple(Loop):
def __init__(self, dataset: Iterator):
super().__init__()
self.iteration_count = 0
self.dataset = dataset
@property
def skip(self) -> bool:
return False
@property
def done(self) -> bool:
return self.iteration_count > len(self.dataset)
def reset(self) -> None:
self.iter_dataset = iter(self.dataset)
if self.restarting:
for _ in range(self.iteration_count):
next(self.iter_dataset)
self.iteration_count += 1
else:
self.outputs = []
def advance(self) -> None:
value = next(self.iter_dataset)
if self.iteration_count == 5:
raise CustomException
self.outputs.append(value)
def on_advance_end(self) -> None:
self.iteration_count += 1
def state_dict(self) -> Dict:
return {"iteration_count": self.iteration_count, "outputs": self.outputs}
def load_state_dict(self, state_dict: Dict) -> None:
self.iteration_count = state_dict["iteration_count"]
self.outputs = state_dict["outputs"]
trainer = Trainer()
data = range(10)
loop = Simple(data)
loop.trainer = trainer
try:
loop.run()
state_dict = {}
except CustomException:
state_dict = loop.state_dict()
loop = Simple(data)
loop.trainer = trainer
loop.load_state_dict(state_dict)
loop.restarting = True
loop.run()
assert not loop.restarting
assert loop.outputs == list(range(10))
def test_loop_hierarchy():
@dataclass
class SimpleProgress(BaseProgress):
increment: int = 0
class Simple(Loop):
def __init__(self, a):
super().__init__()
self.a = a
self.progress = SimpleProgress()
def advance(self, *args: Any, **kwargs: Any) -> None:
loop = getattr(self, "loop_child", None)
if not loop:
return
loop.run()
def on_advance_end(self):
self.progress.increment += 1
@property
def done(self) -> bool:
return self.progress.increment > 0
def reset(self) -> None:
...
def on_save_checkpoint(self) -> Dict:
return {"a": self.a}
def on_load_checkpoint(self, state_dict: Dict) -> None:
self.a = state_dict["a"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
# check the trainer reference is propagated
loop_parent.trainer = Trainer()
assert loop_child.trainer is loop_parent.trainer
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 0},
"loop_child.state_dict": {"a": 2},
"loop_child.progress": {"increment": 0},
}
state_dict["loop_child.state_dict"]["a"] = 3
# check restarting after `load_state_dict`
loop_parent.load_state_dict(state_dict)
assert loop_parent.restarting
loop_parent.run()
# check the new state after `run`
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 1},
"loop_child.state_dict": {"a": 3},
"loop_child.progress": {"increment": 1},
}
loop_parent_copy = deepcopy(loop_parent)
assert loop_parent_copy.state_dict() == loop_parent.state_dict()
assert loop_parent_copy.on_save_checkpoint() == state_dict["state_dict"]
assert loop_parent_copy.loop_child.on_save_checkpoint() == state_dict["loop_child.state_dict"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
loop_parent.load_state_dict(state_dict)
assert loop_parent.progress.increment == 1
assert loop_parent.loop_child.progress.increment == 1
del loop_parent.loop_child
state_dict = loop_parent.state_dict()
assert state_dict == {"state_dict": {"a": 1}, "progress": {"increment": 1}}
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("n_dataloaders,stop_dataloader", [(2, 0), (2, 1), (3, 2)])
def test_loop_restart_progress_multiple_dataloaders(tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch):
n_batches = 5
n_epochs = 3
class ValidationModel(BoringModel):
def __init__(self):
super().__init__()
def validation_step(self, batch, batch_idx, dataloader_idx):
if self.current_epoch == stop_epoch and batch_idx == stop_batch and dataloader_idx == stop_dataloader:
raise CustomException
return super().validation_step(batch, batch_idx)
def val_dataloader(self):
return [super(ValidationModel, self).val_dataloader() for _ in range(n_dataloaders)]
model = ValidationModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=1,
limit_val_batches=n_batches,
)
# simulate a failure
with pytest.raises(CustomException):
trainer.fit(model)
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
checkpoint = torch.load(ckpt_path)["loops"]["fit_loop"]
total_dataloader = stop_epoch * n_dataloaders + stop_dataloader
expected = {
"total": {"ready": total_dataloader + 1, "completed": total_dataloader},
"current": {"ready": stop_dataloader + 1, "completed": stop_dataloader},
}
assert checkpoint["epoch_loop.val_loop.dataloader_progress"] == expected
trainer.fit_loop.load_state_dict(checkpoint)
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
# the fit-validation total batch progress is reset per epoch so it's not counted for the total value.
nbe_total_val_batch = 0 # stop_epoch * n_dataloaders * n_batches
be_total_val_batch = stop_dataloader * n_batches + stop_batch
total_val_batch = nbe_total_val_batch + be_total_val_batch
expected = {
"total": {
"ready": total_val_batch + 1,
"started": total_val_batch + 1,
"processed": total_val_batch,
"completed": total_val_batch,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
"is_last_batch": False,
}
assert trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.state_dict() == expected
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("accumulate_grad_batches", (1, 2, 3))
@pytest.mark.parametrize("n_optimizers", (1, 3, 5))
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("stop_optimizer", (1, 2))
def test_loop_state_on_exception(accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer, n_optimizers, tmpdir):
stop_optimizer = stop_optimizer if stop_optimizer < n_optimizers else 0
n_epochs = 3
n_batches = 3
class TestModel(BoringModel):
def __init__(self):
super().__init__()
if n_optimizers > 1:
self.configure_optimizers = self.configure_optimizers_multiple
def training_step(self, batch, batch_idx, optimizer_idx=0):
if self.trainer.current_epoch == stop_epoch and batch_idx == stop_batch and optimizer_idx == stop_optimizer:
raise CustomException
return super().training_step(batch, batch_idx)
def configure_optimizers_multiple(self):
optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]
lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)
lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)
# no scheduler for optimizer_2
lr_schedulers = [lr_scheduler_0, {"scheduler": lr_scheduler_1, "interval": "step"}]
return optimizers, lr_schedulers
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=n_batches,
limit_val_batches=0,
accumulate_grad_batches=accumulate_grad_batches,
enable_progress_bar=False,
logger=False,
enable_checkpointing=False,
)
# simulate a failure
with pytest.raises(CustomException):
trainer.fit(model)
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)
optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress
sch_progress = trainer.fit_loop.epoch_loop.scheduler_progress
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
nbe_batches_completed = stop_epoch * n_batches
be_batches_completed = stop_batch
be_batches_ready = stop_batch + 1
# lightning applies leftover accumulated gradients when the epoch ends
has_leftover_accumulation_batches = n_batches % accumulate_grad_batches != 0
# number of batches that will call `optimizer.step()` during non-breaking and breaking epochs
nbe_stepping_batches = nbe_batches_completed // accumulate_grad_batches
be_stepping_batches = be_batches_completed // accumulate_grad_batches
nbe_total_opt_steps = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_step = be_batches_ready % accumulate_grad_batches == 0 or has_leftover_accumulation_batches
be_total_opt_steps = be_stepping_batches * n_optimizers + does_last_be_batch_step * stop_optimizer
assert optim_progress.optimizer_steps == nbe_total_opt_steps + be_total_opt_steps
assert optim_progress.optimizer.step.current.completed == be_total_opt_steps
has_opt_stepped_in_be = stop_batch + 1 >= accumulate_grad_batches
nbe_total_zero_grad = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_zero_grad = be_batches_completed % accumulate_grad_batches == 0
# `max` because the first batch always zero-grads
be_total_zero_grad = max(1, be_stepping_batches) * n_optimizers + stop_optimizer * does_last_be_batch_zero_grad
assert optim_progress.optimizer.zero_grad.total.completed == nbe_total_zero_grad + be_total_zero_grad
assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad
nbe_sch_steps = stop_epoch
be_sch_steps = 0 # the current epoch did not complete
if n_optimizers > 1:
# assumes that the scheduler config is unchanged
# `* 1` because there is only one step-level scheduler
nbe_sch_steps = stop_epoch + nbe_stepping_batches + has_leftover_accumulation_batches * 1
# `0 +` for the epoch-level scheduler
be_sch_steps = 0 + be_stepping_batches
assert sch_progress.total.completed == nbe_sch_steps + be_sch_steps
assert sch_progress.current.completed == be_sch_steps
expected = {
"state_dict": ANY,
"epoch_progress": {
"total": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
"current": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
},
"epoch_loop.state_dict": ANY,
"epoch_loop.batch_progress": {
"total": {
"ready": nbe_batches_completed + be_batches_completed + 1,
"started": nbe_batches_completed + be_batches_completed + 1,
"processed": nbe_batches_completed + be_batches_completed,
"completed": nbe_batches_completed + be_batches_completed,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
"is_last_batch": False,
},
"epoch_loop.scheduler_progress": {
"total": {"ready": nbe_sch_steps + be_sch_steps, "completed": nbe_sch_steps + be_sch_steps},
"current": {"ready": be_sch_steps, "completed": be_sch_steps},
},
"epoch_loop.batch_loop.state_dict": ANY,
"epoch_loop.batch_loop.manual_loop.state_dict": ANY,
"epoch_loop.batch_loop.manual_loop.optim_step_progress": {
"total": {"ready": 0, "completed": 0},
"current": {"ready": 0, "completed": 0},
},
"epoch_loop.batch_loop.optimizer_loop.state_dict": {},
"epoch_loop.batch_loop.optimizer_loop.optim_progress": {
"optimizer_position": stop_optimizer,
"optimizer": {
"step": {
"total": {
"ready": nbe_total_opt_steps + be_total_opt_steps + has_opt_stepped_in_be,
"completed": nbe_total_opt_steps + be_total_opt_steps,
},
"current": {"ready": be_total_opt_steps + has_opt_stepped_in_be, "completed": be_total_opt_steps},
},
"zero_grad": {
"total": {
"ready": nbe_total_zero_grad + be_total_zero_grad,
"started": nbe_total_zero_grad + be_total_zero_grad,
"completed": nbe_total_zero_grad + be_total_zero_grad,
},
"current": {
"ready": be_total_zero_grad,
"started": be_total_zero_grad,
"completed": be_total_zero_grad,
},
},
},
},
"epoch_loop.val_loop.state_dict": ANY,
"epoch_loop.val_loop.dataloader_progress": ANY,
"epoch_loop.val_loop.epoch_loop.state_dict": ANY,
"epoch_loop.val_loop.epoch_loop.batch_progress": ANY,
"epoch_loop.val_loop._results": ANY,
"epoch_loop._results": ANY,
}
assert checkpoint["loops"]["fit_loop"] == expected
trainer.fit_loop.load_state_dict(checkpoint["loops"]["fit_loop"])
state_dict = trainer.fit_loop.state_dict()
# need to remove these elements for comparison; comparing with `fit_loop.state_dict()` would require the
# fit loop to have an iterator, which is only available during training
state_dict["epoch_loop.state_dict"]["dataloader_state_dict"] = ANY
checkpoint["loops"]["fit_loop"]["epoch_loop.state_dict"]["dataloader_state_dict"] = ANY
assert state_dict == checkpoint["loops"]["fit_loop"]
trainer.fit_loop.load_state_dict(checkpoint["loops"]["fit_loop"])
# test resetting manually, we expect all `ready` counters to be reset to `completed`
trainer.fit_loop.reset()
trainer.fit_loop.epoch_loop.reset()
trainer.fit_loop.epoch_loop.batch_loop.reset()
trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.reset()
epoch_progress = trainer.fit_loop.epoch_progress
assert epoch_progress.current.ready == stop_epoch
assert epoch_progress.current.completed == stop_epoch
batch_progress = trainer.fit_loop.epoch_loop.batch_progress
assert batch_progress.current.ready == be_batches_completed
assert batch_progress.current.completed == be_batches_completed
optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress
assert optim_progress.optimizer.step.current.ready == be_total_opt_steps
assert optim_progress.optimizer.step.current.completed == be_total_opt_steps
assert optim_progress.optimizer.zero_grad.current.ready == be_total_zero_grad
assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad
state_dict = trainer.fit_loop.state_dict()
assert state_dict != checkpoint["loops"]["fit_loop"]
assert state_dict["epoch_progress"]["total"]["started"] == stop_epoch + 1
assert state_dict["epoch_progress"]["current"]["started"] == stop_epoch
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("n_optimizers", (1, 3, 5))
def test_loop_state_on_complete_run(n_optimizers, tmpdir):
n_epochs = 3
n_batches = 3
accumulate_grad_batches = 1
class TestModel(BoringModel):
def __init__(self):
super().__init__()
if n_optimizers > 1:
self.configure_optimizers = self.configure_optimizers_multiple
def training_step(self, batch, batch_idx, optimizer_idx=0):
return super().training_step(batch, batch_idx)
def configure_optimizers_multiple(self):
optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]
lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)
lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)
# no scheduler for optimizer_2
lr_schedulers = [lr_scheduler_0, {"scheduler": lr_scheduler_1, "interval": "step"}]
return optimizers, lr_schedulers
def train_dataloader(self):
# override to test the `is_last_batch` value
return DataLoader(RandomDataset(32, n_batches))
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_val_batches=0,
accumulate_grad_batches=accumulate_grad_batches,
enable_progress_bar=False,
logger=False,
)
trainer.fit(model)
assert trainer.num_training_batches == n_batches
ckpt_path = trainer.checkpoint_callback.best_model_path
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)
n_sch_steps_total = n_epochs
n_sch_steps_current = 1
if n_optimizers > 1:
n_sch_steps_total = n_epochs + n_epochs * n_batches
n_sch_steps_current = n_batches + 1
expected = {
"state_dict": ANY,
"epoch_progress": {
"total": {
"ready": n_epochs,
"started": n_epochs,
"processed": n_epochs,
"completed": n_epochs - 1,
},
"current": {
"ready": n_epochs,
"started": n_epochs,
"processed": n_epochs,
"completed": n_epochs - 1,
},
},
"epoch_loop.state_dict": ANY,
"epoch_loop.batch_progress": {
"total": {
"ready": n_epochs * n_batches,
"started": n_epochs * n_batches,
"processed": n_epochs * n_batches,
"completed": n_epochs * n_batches,
},
"current": {
"ready": n_batches,
"started": n_batches,
"processed": n_batches,
"completed": n_batches,
},
"is_last_batch": True,
},
"epoch_loop.scheduler_progress": {
"total": {"ready": n_sch_steps_total, "completed": n_sch_steps_total},
"current": {"ready": n_sch_steps_current, "completed": n_sch_steps_current},
},
"epoch_loop.batch_loop.state_dict": ANY,
"epoch_loop.batch_loop.manual_loop.state_dict": ANY,
"epoch_loop.batch_loop.manual_loop.optim_step_progress": {
"total": {"ready": 0, "completed": 0},
"current": {"ready": 0, "completed": 0},
},
"epoch_loop.batch_loop.optimizer_loop.state_dict": {},
"epoch_loop.batch_loop.optimizer_loop.optim_progress": {
"optimizer_position": n_optimizers,
"optimizer": {
"step": {
"total": {
"ready": n_epochs * n_batches * n_optimizers,
"completed": n_epochs * n_batches * n_optimizers,
},
"current": {
"ready": n_batches * n_optimizers,
"completed": n_batches * n_optimizers,
},
},
"zero_grad": {
"total": {
"ready": n_epochs * n_batches * n_optimizers,
"started": n_epochs * n_batches * n_optimizers,
"completed": n_epochs * n_batches * n_optimizers,
},
"current": {
"ready": n_batches * n_optimizers,
"started": n_batches * n_optimizers,
"completed": n_batches * n_optimizers,
},
},
},
},
"epoch_loop.val_loop.state_dict": ANY,
"epoch_loop.val_loop.dataloader_progress": ANY,
"epoch_loop.val_loop.epoch_loop.state_dict": ANY,
"epoch_loop.val_loop.epoch_loop.batch_progress": ANY,
"epoch_loop.val_loop._results": ANY,
"epoch_loop._results": ANY,
}
assert checkpoint["loops"]["fit_loop"] == expected
def test_fit_loop_reset(tmpdir):
"""Test that the reset logic in fit- and epoch loop is aware of whether the loop is restarting from a completed
loop or from a mid-epoch checkpoint."""
# generate checkpoints at end of epoch and mid-epoch
model = BoringModel()
checkpoint_callback = ModelCheckpoint(
dirpath=tmpdir,
every_n_train_steps=2,
save_top_k=-1,
)
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=4,
max_epochs=2,
callbacks=[checkpoint_callback],
logger=False,
enable_model_summary=False,
)
trainer.fit(model)
# reset state loaded from a checkpoint from mid-epoch
mid_epoch_ckpt = torch.load(str(tmpdir / "epoch=0-step=2.ckpt"))
fit_loop = trainer.fit_loop
epoch_loop = fit_loop.epoch_loop
optimizer_loop = epoch_loop.batch_loop.optimizer_loop
assert not fit_loop.restarting
assert not epoch_loop.restarting
assert not optimizer_loop.restarting
# we load exactly what was saved - no reset yet
fit_loop.load_state_dict(mid_epoch_ckpt["loops"]["fit_loop"])
# resetting from a mid-of-epoch checkpoint SHOULD NOT reset the current counters to 0
fit_loop.reset()
epoch_loop.reset()
optimizer_loop.reset()
assert fit_loop.restarting
assert fit_loop.epoch_progress.total.ready == 1
assert fit_loop.epoch_progress.total.completed == 0 # the checkpoint was saved mid epoch
assert fit_loop.epoch_progress.current.ready == 0
assert fit_loop.epoch_progress.current.completed == 0
assert epoch_loop.restarting
assert epoch_loop.batch_progress.total.ready == 2
assert epoch_loop.batch_progress.total.processed == 2
assert epoch_loop.batch_progress.total.completed == 1 # the checkpoint was saved on train_batch_end
assert epoch_loop.batch_progress.current.ready == 1 # currents get set to the completed value
assert epoch_loop.batch_progress.current.processed == 1
assert epoch_loop.batch_progress.current.completed == 1
assert optimizer_loop.restarting
assert optimizer_loop.optim_progress.optimizer_position == 1
# reset state loaded from a checkpoint from the end of an epoch
end_of_epoch_ckpt = torch.load(str(tmpdir / "epoch=0-step=4.ckpt"))
fit_loop = trainer.fit_loop
epoch_loop = fit_loop.epoch_loop
fit_loop.restarting = False
epoch_loop.restarting = False
optimizer_loop.restarting = False
# we load exactly what was saved - no reset yet
fit_loop.load_state_dict(end_of_epoch_ckpt["loops"]["fit_loop"])
# resetting from a end-of-epoch checkpoint SHOULD reset the current counters to 0
fit_loop.reset()
epoch_loop.reset()
optimizer_loop.reset()
assert fit_loop.restarting
assert fit_loop.epoch_progress.total.ready == 1
assert fit_loop.epoch_progress.total.completed == 0 # the checkpoint saves before the epoch completes
assert fit_loop.epoch_progress.current.ready == 0
assert fit_loop.epoch_progress.current.completed == 0
assert epoch_loop.restarting
assert epoch_loop.batch_progress.total.ready == 4
assert epoch_loop.batch_progress.total.processed == 4
assert epoch_loop.batch_progress.total.completed == 3 # the checkpoint was saved on train_batch_end
assert epoch_loop.batch_progress.current.ready == 3 # currents get set to the completed value
assert epoch_loop.batch_progress.current.processed == 3
assert epoch_loop.batch_progress.current.completed == 3
assert optimizer_loop.optim_progress.optimizer_position == 1
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize(
["train_datasets", "val_datasets"],
[([RandomDataset], [RandomDataset]), ([RandomDataset], [RandomDataset, RandomDataset])],
)
@pytest.mark.parametrize("val_check_interval", [0.5, 1.0])
def test_fit_can_fail_during_validation(train_datasets, val_datasets, val_check_interval, tmpdir):
size, n_batches = 2, 4
stop_batch = 1
n_val_dataloaders = len(val_datasets)
stop_dataloader = n_val_dataloaders - 1
class TestModel(LightningModule):
def __init__(self, should_fail):
super().__init__()
self.layer = torch.nn.Linear(size, 2)
self.should_fail = should_fail
def step(self, batch):
return sum(self.layer(b).sum() for b in batch)
def training_step(self, batch, batch_idx):
return self.step(batch)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
if self.should_fail and dataloader_idx == stop_dataloader and batch_idx == stop_batch:
raise CustomException
return self.step(batch)
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
def train_dataloader(self):
return [DataLoader(cls(size, n_batches)) for cls in train_datasets]
def val_dataloader(self):
return [DataLoader(cls(size, n_batches)) for cls in val_datasets]
model = TestModel(False)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=val_check_interval,
num_sanity_val_steps=0,
enable_progress_bar=False,
)
trainer.fit(model)
ckpt_path = os.path.join(tmpdir, ".pl_auto_save.ckpt")
assert not os.path.exists(ckpt_path), "Shouldn't have failed"
state_dict = trainer.fit_loop.state_dict()
expected_global_step = trainer.global_step
assert state_dict["epoch_loop.batch_progress"] == {
"total": {"ready": n_batches, "started": n_batches, "processed": n_batches, "completed": n_batches},
"current": {"ready": n_batches, "started": n_batches, "processed": n_batches, "completed": n_batches},
"is_last_batch": True,
}
val_per_epoch = int(1 // val_check_interval)
assert state_dict["epoch_loop.val_loop.dataloader_progress"] == {
"total": {"ready": n_val_dataloaders * val_per_epoch, "completed": n_val_dataloaders * val_per_epoch},
"current": {"ready": n_val_dataloaders, "completed": n_val_dataloaders},
}
assert state_dict["epoch_loop.val_loop.epoch_loop.batch_progress"] == {
"total": {
"ready": n_val_dataloaders * val_per_epoch * n_batches,
"started": n_val_dataloaders * val_per_epoch * n_batches,
"processed": n_val_dataloaders * val_per_epoch * n_batches,
"completed": n_val_dataloaders * val_per_epoch * n_batches,
},
"current": {"ready": n_batches, "completed": n_batches, "started": n_batches, "processed": n_batches},
"is_last_batch": True,
}
model = TestModel(True)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=val_check_interval,
num_sanity_val_steps=0,
enable_progress_bar=False,
)
with pytest.raises(CustomException):
# will stop during validation
trainer.fit(model)
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)["loops"]["fit_loop"]
per_val_train_batches = int(n_batches * val_check_interval)
assert checkpoint["epoch_loop.batch_progress"] == {
"total": {
"ready": per_val_train_batches,
"started": per_val_train_batches,
"processed": per_val_train_batches,
"completed": per_val_train_batches,
},
"current": {
"ready": per_val_train_batches,
"started": per_val_train_batches,
"processed": per_val_train_batches,
"completed": per_val_train_batches,
},
"is_last_batch": val_check_interval == 1,
}
val_batch_progress = "epoch_loop.val_loop.epoch_loop.batch_progress"
# "nb_": non-breaking
nb_total_val_batch = stop_dataloader * n_batches
assert checkpoint[val_batch_progress] == {
"total": {
"ready": nb_total_val_batch + stop_batch + 1,
"started": nb_total_val_batch + stop_batch + 1,
"processed": nb_total_val_batch + stop_batch,
"completed": nb_total_val_batch + stop_batch,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
"is_last_batch": False,
}
model = TestModel(False)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=val_check_interval,
enable_progress_bar=False,
)
trainer.fit(model, ckpt_path=ckpt_path)
assert trainer.global_step == expected_global_step
state_dict_after_restart = trainer.fit_loop.state_dict()
# should get the same values as in the run that did not fail
# totals are increased by 1 (the failed batch which never completed)
expected = state_dict.copy()
assert state_dict_after_restart["epoch_loop.batch_progress"] == expected["epoch_loop.batch_progress"]
val_dl_progress = "epoch_loop.val_loop.dataloader_progress"
expected[val_dl_progress]["total"]["ready"] += 1
assert state_dict_after_restart[val_dl_progress] == expected[val_dl_progress]
expected[val_batch_progress]["total"]["ready"] += 1
expected[val_batch_progress]["total"]["started"] += 1
assert state_dict_after_restart[val_batch_progress] == expected[val_batch_progress]
@pytest.mark.parametrize("should_fail", [False, True])
@pytest.mark.parametrize("persistent_workers", [pytest.param(False, marks=RunIf(slow=True)), True])
def test_workers_are_shutdown(tmpdir, should_fail, persistent_workers):
# `num_workers == 1` uses `_MultiProcessingDataLoaderIter`
# `persistent_workers` makes sure `self._iterator` gets set on the `DataLoader` instance
class _TestMultiProcessingDataLoaderIter(_MultiProcessingDataLoaderIter):
def __init__(self, *args, dataloader, **kwargs):
super().__init__(*args, **kwargs)
self.dataloader = dataloader
def _shutdown_workers(self):
self.dataloader.count_shutdown_workers += 1
super()._shutdown_workers()
class TestDataLoader(DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.count_shutdown_workers = 0
def _get_iterator(self):
if self.num_workers == 0:
return super()._get_iterator()
else:
self.check_worker_number_rationality()
return _TestMultiProcessingDataLoaderIter(self, dataloader=self)
train_dataloader = TestDataLoader(RandomDataset(32, 64), num_workers=1, persistent_workers=persistent_workers)
val_dataloader = TestDataLoader(RandomDataset(32, 64), num_workers=1, persistent_workers=persistent_workers)
class TestCallback(Callback):
def on_train_epoch_end(self, trainer, *_):
if trainer.current_epoch == 1:
raise CustomException
max_epochs = 3
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=max_epochs,
callbacks=TestCallback() if should_fail else None,
)
if should_fail:
with pytest.raises(CustomException):
trainer.fit(model, train_dataloader, val_dataloader)
else:
trainer.fit(model, train_dataloader, val_dataloader)
assert train_dataloader.count_shutdown_workers == 2 if should_fail else (2 if persistent_workers else max_epochs)
# on sanity checking end, the workers are being deleted too.
assert val_dataloader.count_shutdown_workers == 2 if persistent_workers else (3 if should_fail else max_epochs + 1)
assert train_dataloader._iterator is None
assert val_dataloader._iterator is None
| [
"torch.nn.Linear",
"torch.optim.lr_scheduler.StepLR",
"torch.load"
] | 1.8 | neptune-ml/pytorch-lightning | 3bcaed52454f3e6c3bce5513032e34302e5b1bb6 |
1.8 | import torch
from torch import nn
from typing import List
from .base import ResnetBase
class Segmenter(ResnetBase):
"""A ResNet34 U-Net model, as described in
https://github.com/fastai/fastai/blob/master/courses/dl2/carvana-unet-lrg.ipynb
Attributes:
imagenet_base: boolean, default: False
Whether or not to load weights pretrained on imagenet
"""
def __init__(self, imagenet_base: bool = False) -> None:
super().__init__(imagenet_base=imagenet_base)
self.target_modules = [str(x) for x in [2, 4, 5, 6]]
self.hooks = self.add_hooks()
self.relu = nn.ReLU()
self.upsamples = nn.ModuleList([
UpBlock(2048, 1024, 512),
UpBlock(512, 512, 256),
UpBlock(256, 256, 64),
UpBlock(64, 64, 32),
UpBlock(32, 3, 16),
])
self.conv_transpose = nn.ConvTranspose2d(16, 1, 1)
self.sigmoid = nn.Sigmoid()
def add_hooks(self) -> List[torch.utils.hooks.RemovableHandle]:
hooks = []
for name, child in self.pretrained.named_children():
if name in self.target_modules:
hooks.append(child.register_forward_hook(self.save_output))
return hooks
def retrieve_hooked_outputs(self) -> List[torch.Tensor]:
# to be called in the forward pass, this method returns the tensors
# which were saved by the forward hooks
outputs = []
for name, child in self.pretrained.named_children():
if name in self.target_modules:
outputs.append(child.output)
return outputs
def cleanup(self) -> None:
# removes the hooks, and the tensors which were added
for name, child in self.pretrained.named_children():
if name in self.target_modules:
# allows the method to be safely called even if
# the hooks aren't there
try:
del child.output
except AttributeError:
continue
for hook in self.hooks:
hook.remove()
@staticmethod
def save_output(module, input, output):
# the hook to add to the target modules
module.output = output
def load_base(self, state_dict: dict) -> None:
# This allows a model trained on the classifier to be loaded
# into the model used for segmentation, even though their state_dicts
# differ
self.load_state_dict(state_dict, strict=False)
def forward(self, x):
org_input = x
x = self.relu(self.pretrained(x))
# we reverse the outputs so that the smallest output
# is the first one we get, and the largest the last
interim = self.retrieve_hooked_outputs()[::-1]
for upsampler, interim_output in zip(self.upsamples[:-1], interim):
x = upsampler(x, interim_output)
x = self.upsamples[-1](x, org_input)
return self.sigmoid(self.conv_transpose(x))
class UpBlock(nn.Module):
def __init__(self, in_channels: int, across_channels: int, out_channels: int) -> None:
super().__init__()
up_out = across_out = out_channels // 2
self.conv_across = nn.Conv2d(across_channels, across_out, 1)
# alternative: ConvTranspose2d(in_channels, up_out, 2, stride=2)
self.upsample = nn.Sequential(nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(in_channels, up_out, kernel_size=1))
self.batchnorm = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
def forward(self, x_up, x_across):
upsampled = self.upsample(x_up)
skipped = self.conv_across(x_across)
joint = torch.cat((upsampled, skipped), dim=1)
return self.batchnorm(self.relu(joint))
| [
"torch.cat",
"torch.nn.Sigmoid",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d"
] | 1.8.1 | fedesigno/solar-panel-segmentation | 75856be3361bb4904387e6abc986627d1cc98ebb |
1.6 | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from openspeech.search.beam_search_base import OpenspeechBeamSearchBase
from openspeech.decoders import RNNTransducerDecoder
class BeamSearchRNNTransducer(OpenspeechBeamSearchBase):
r"""
RNN Transducer Beam Search
Reference: RNN-T FOR LATENCY CONTROLLED ASR WITH IMPROVED BEAM SEARCH (https://arxiv.org/pdf/1911.01629.pdf)
Args: joint, decoder, beam_size, expand_beam, state_beam, blank_id
joint: joint `encoder_outputs` and `decoder_outputs`
decoder (TransformerTransducerDecoder): base decoder of transformer transducer model.
beam_size (int): size of beam.
expand_beam (int): The threshold coefficient to limit the number of expanded hypotheses.
state_beam (int): The threshold coefficient to decide if hyps in A (process_hyps)
is likely to compete with hyps in B (ongoing_beams)
blank_id (int): blank id
Inputs: encoder_output, max_length
encoder_output (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size
``(seq_length, dimension)``
max_length (int): max decoding time step
Returns:
* predictions (torch.LongTensor): model predictions.
"""
def __init__(
self,
joint,
decoder: RNNTransducerDecoder,
beam_size: int = 3,
expand_beam: float = 2.3,
state_beam: float = 4.6,
blank_id: int = 3,
) -> None:
super(BeamSearchRNNTransducer, self).__init__(decoder, beam_size)
self.joint = joint
self.expand_beam = expand_beam
self.state_beam = state_beam
self.blank_id = blank_id
def forward(self, encoder_outputs: torch.Tensor, max_length: int):
r"""
Beam search decoding.
Inputs: encoder_output, max_length
encoder_outputs (torch.FloatTensor): A output sequence of encoders. `FloatTensor` of size
``(batch, seq_length, dimension)``
max_length (int): max decoding time step
Returns:
* predictions (torch.LongTensor): model predictions.
"""
hypothesis = list()
hypothesis_score = list()
for batch_idx in range(encoder_outputs.size(0)):
blank = (
torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.blank_id
)
step_input = (
torch.ones((1, 1), device=encoder_outputs.device, dtype=torch.long) * self.sos_id
)
hyp = {
"prediction": [self.sos_id],
"logp_score": 0.0,
"hidden_states": None,
}
ongoing_beams = [hyp]
for t_step in range(max_length):
process_hyps = ongoing_beams
ongoing_beams = list()
while True:
if len(ongoing_beams) >= self.beam_size:
break
a_best_hyp = max(process_hyps, key=lambda x: x["logp_score"] / len(x["prediction"]))
if len(ongoing_beams) > 0:
b_best_hyp = max(
ongoing_beams,
key=lambda x: x["logp_score"] / len(x["prediction"]),
)
a_best_prob = a_best_hyp["logp_score"]
b_best_prob = b_best_hyp["logp_score"]
if b_best_prob >= self.state_beam + a_best_prob:
break
process_hyps.remove(a_best_hyp)
step_input[0, 0] = a_best_hyp["prediction"][-1]
step_outputs, hidden_states = self.decoder(step_input, a_best_hyp["hidden_states"])
log_probs = self.joint(encoder_outputs[batch_idx, t_step, :], step_outputs.view(-1))
topk_targets, topk_idx = log_probs.topk(k=self.beam_size)
if topk_idx[0] != blank:
best_logp = topk_targets[0]
else:
best_logp = topk_targets[1]
for j in range(topk_targets.size(0)):
topk_hyp = {
"prediction": a_best_hyp["prediction"][:],
"logp_score": a_best_hyp["logp_score"] + topk_targets[j],
"hidden_states": a_best_hyp["hidden_states"],
}
if topk_idx[j] == self.blank_id:
ongoing_beams.append(topk_hyp)
continue
if topk_targets[j] >= best_logp - self.expand_beam:
topk_hyp["prediction"].append(topk_idx[j].item())
topk_hyp["hidden_states"] = hidden_states
process_hyps.append(topk_hyp)
ongoing_beams = sorted(
ongoing_beams,
key=lambda x: x["logp_score"] / len(x["prediction"]),
reverse=True,
)[0]
hypothesis.append(torch.LongTensor(ongoing_beams["prediction"][1:]))
hypothesis_score.append(ongoing_beams["logp_score"] / len(ongoing_beams["prediction"]))
return self._fill_sequence(hypothesis) | [
"torch.LongTensor",
"torch.ones"
] | 1.6.0 | techthiyanes/openspeech | 10307587f08615224df5a868fb5249c68c70b12d |
1.8 | import pytest
import numpy as np
import torch
from openunmix import transforms
@pytest.fixture(params=[4096, 44100])
def nb_timesteps(request):
return int(request.param)
@pytest.fixture(params=[1, 2])
def nb_channels(request):
return request.param
@pytest.fixture(params=[1, 2])
def nb_samples(request):
return request.param
@pytest.fixture(params=[1024, 2048, 4096])
def nfft(request):
return int(request.param)
@pytest.fixture(params=[2, 4])
def hop(request, nfft):
return nfft // request.param
@pytest.fixture(params=["torch", "asteroid"])
def method(request):
return request.param
@pytest.fixture
def audio(request, nb_samples, nb_channels, nb_timesteps):
return torch.rand((nb_samples, nb_channels, nb_timesteps))
def test_stft(audio, nfft, hop, method):
# we should only test for center=True as
# False doesn't pass COLA
# https://github.com/pytorch/audio/issues/500
stft, istft = transforms.make_filterbanks(n_fft=nfft, n_hop=hop, center=True, method=method)
X = stft(audio)
X = X.detach()
out = istft(X, length=audio.shape[-1])
assert np.sqrt(np.mean((audio.detach().numpy() - out.detach().numpy()) ** 2)) < 1e-6
| [
"torch.rand"
] | 1.8.0 | ParhamYZ/MusicSourceSeparation | 26a42fbebdf50d2ae2ef674ef64f4c88cbe7e8e3 |
1.6 | import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2seq_encoders.gated_cnn_encoder import GatedCnnEncoder
class TestGatedCnnEncoder(AllenNlpTestCase):
def test_gated_cnn_encoder(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32,
layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert list(output.size()) == [5, 10, 64]
def test_gated_cnn_encoder_dilations(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32, layers=[[[2, 32, 1]], [[2, 32, 2]], [[2, 32, 4]], [[2, 32, 8]]]
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert list(output.size()) == [5, 10, 64]
def test_gated_cnn_encoder_layers(self):
cnn_encoder = GatedCnnEncoder(
input_dim=32,
layers=[[[4, 32]], [[1, 16], [5, 16], [1, 32]], [[1, 64], [5, 64], [1, 32]]],
return_all_layers=True,
)
token_embeddings = torch.rand(5, 10, 32)
mask = torch.ones(5, 10).bool()
mask[0, 7:] = False
mask[1, 5:] = False
output = cnn_encoder(token_embeddings, mask)
assert len(output) == 3
concat_layers = torch.cat([layer.unsqueeze(1) for layer in output], dim=1)
assert list(concat_layers.size()) == [5, 3, 10, 64]
| [
"torch.rand",
"torch.ones"
] | 1.6.0 | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 |
1.6 | import pytest
from numpy.testing import assert_almost_equal
import torch
from torch.nn import LSTM
from torch.nn.utils.rnn import pack_padded_sequence
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.nn.util import sort_batch_by_length, get_lengths_from_binary_sequence_mask
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
class TestPytorchSeq2VecWrapper(AllenNlpTestCase):
def test_get_dimensions_is_correct(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 14
assert encoder.get_input_dim() == 2
lstm = LSTM(
bidirectional=False, num_layers=3, input_size=2, hidden_size=7, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
assert encoder.get_output_dim() == 7
assert encoder.get_input_dim() == 2
def test_forward_pulls_out_correct_tensor_without_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=2, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.FloatTensor([[[0.7, 0.8], [0.1, 1.5]]])
lstm_output = lstm(input_tensor)
encoder_output = encoder(input_tensor, None)
assert_almost_equal(encoder_output.data.numpy(), lstm_output[0].data.numpy()[:, -1, :])
def test_forward_pulls_out_correct_tensor_with_sequence_lengths(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[1, 6:, :] = 0
input_tensor[2, 4:, :] = 0
input_tensor[3, 2:, :] = 0
input_tensor[4, 1:, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, 4:] = False
mask[3, 2:] = False
mask[4, 1:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
packed_sequence = pack_padded_sequence(
input_tensor, sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
reshaped_state = state[0].transpose(0, 1)[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_forward_works_even_with_empty_sequences(self):
lstm = LSTM(
bidirectional=True, num_layers=3, input_size=3, hidden_size=11, batch_first=True
)
encoder = PytorchSeq2VecWrapper(lstm)
tensor = torch.rand([5, 7, 3])
tensor[1, 6:, :] = 0
tensor[2, :, :] = 0
tensor[3, 2:, :] = 0
tensor[4, :, :] = 0
mask = torch.ones(5, 7).bool()
mask[1, 6:] = False
mask[2, :] = False
mask[3, 2:] = False
mask[4, :] = False
results = encoder(tensor, mask)
for i in (0, 1, 3):
assert not (results[i] == 0.0).data.all()
for i in (2, 4):
assert (results[i] == 0.0).data.all()
def test_forward_pulls_out_correct_tensor_with_unsorted_batches(self):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7, batch_first=True)
encoder = PytorchSeq2VecWrapper(lstm)
input_tensor = torch.rand([5, 7, 3])
input_tensor[0, 3:, :] = 0
input_tensor[1, 4:, :] = 0
input_tensor[2, 2:, :] = 0
input_tensor[3, 6:, :] = 0
mask = torch.ones(5, 7).bool()
mask[0, 3:] = False
mask[1, 4:] = False
mask[2, 2:] = False
mask[3, 6:] = False
sequence_lengths = get_lengths_from_binary_sequence_mask(mask)
sorted_inputs, sorted_sequence_lengths, restoration_indices, _ = sort_batch_by_length(
input_tensor, sequence_lengths
)
packed_sequence = pack_padded_sequence(
sorted_inputs, sorted_sequence_lengths.tolist(), batch_first=True
)
_, state = lstm(packed_sequence)
# Transpose output state, extract the last forward and backward states and
# reshape to be of dimension (batch_size, 2 * hidden_size).
sorted_transposed_state = state[0].transpose(0, 1).index_select(0, restoration_indices)
reshaped_state = sorted_transposed_state[:, -2:, :].contiguous()
explicitly_concatenated_state = torch.cat(
[reshaped_state[:, 0, :].squeeze(1), reshaped_state[:, 1, :].squeeze(1)], -1
)
encoder_output = encoder(input_tensor, mask)
assert_almost_equal(encoder_output.data.numpy(), explicitly_concatenated_state.data.numpy())
def test_wrapper_raises_if_batch_first_is_false(self):
with pytest.raises(ConfigurationError):
lstm = LSTM(bidirectional=True, num_layers=3, input_size=3, hidden_size=7)
_ = PytorchSeq2VecWrapper(lstm)
def test_wrapper_works_with_alternating_lstm(self):
model = PytorchSeq2VecWrapper(
StackedAlternatingLstm(input_size=4, hidden_size=5, num_layers=3)
)
input_tensor = torch.randn(2, 3, 4)
mask = torch.ones(2, 3).bool()
output = model(input_tensor, mask)
assert tuple(output.size()) == (2, 5)
| [
"torch.rand",
"torch.nn.LSTM",
"torch.FloatTensor",
"torch.ones",
"torch.randn"
] | 1.6.0 | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 |
1.6 | """
A maxout neural network.
"""
from typing import Sequence, Union
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.common.registrable import FromParams
class Maxout(torch.nn.Module, FromParams):
"""
This `Module` is a maxout neural network.
# Parameters
input_dim : `int`, required
The dimensionality of the input. We assume the input has shape `(batch_size, input_dim)`.
num_layers : `int`, required
The number of maxout layers to apply to the input.
output_dims : `Union[int, Sequence[int]]`, required
The output dimension of each of the maxout layers. If this is a single `int`, we use
it for all maxout layers. If it is a `Sequence[int]`, `len(output_dims)` must be
`num_layers`.
pool_sizes : `Union[int, Sequence[int]]`, required
The size of max-pools. If this is a single `int`, we use
it for all maxout layers. If it is a `Sequence[int]`, `len(pool_sizes)` must be
`num_layers`.
dropout : `Union[float, Sequence[float]]`, optional (default = `0.0`)
If given, we will apply this amount of dropout after each layer. Semantics of `float`
versus `Sequence[float]` is the same as with other parameters.
"""
def __init__(
self,
input_dim: int,
num_layers: int,
output_dims: Union[int, Sequence[int]],
pool_sizes: Union[int, Sequence[int]],
dropout: Union[float, Sequence[float]] = 0.0,
) -> None:
super().__init__()
if not isinstance(output_dims, list):
output_dims = [output_dims] * num_layers # type: ignore
if not isinstance(pool_sizes, list):
pool_sizes = [pool_sizes] * num_layers # type: ignore
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
if len(output_dims) != num_layers:
raise ConfigurationError(
"len(output_dims) (%d) != num_layers (%d)" % (len(output_dims), num_layers)
)
if len(pool_sizes) != num_layers:
raise ConfigurationError(
"len(pool_sizes) (%d) != num_layers (%d)" % (len(pool_sizes), num_layers)
)
if len(dropout) != num_layers:
raise ConfigurationError(
"len(dropout) (%d) != num_layers (%d)" % (len(dropout), num_layers)
)
self._pool_sizes = pool_sizes
input_dims = [input_dim] + output_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim, pool_size in zip(
input_dims, output_dims, pool_sizes
):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim * pool_size))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dims = output_dims
self._output_dim = output_dims[-1]
self._input_dim = input_dim
def get_output_dim(self):
return self._output_dim
def get_input_dim(self):
return self._input_dim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
output = inputs
for layer, layer_output_dim, dropout, pool_size in zip(
self._linear_layers, self._output_dims, self._dropout, self._pool_sizes
):
affine_output = layer(output)
# Compute and apply the proper shape for the max.
shape = list(inputs.size())
shape[-1] = layer_output_dim
shape.append(pool_size)
maxed_output = torch.max(affine_output.view(*shape), dim=-1)[0]
dropped_output = dropout(maxed_output)
output = dropped_output
return output
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.ModuleList"
] | 1.6.0 | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 |
1.6 | import copy
import pytest
import torch
from torch.testing import assert_allclose
from transformers import AutoModel
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.bert.modeling_bert import BertEmbeddings
from transformers.models.albert.configuration_albert import AlbertConfig
from transformers.models.albert.modeling_albert import AlbertEmbeddings
from allennlp.common import Params, FromParams
from allennlp.modules.transformer import (
TransformerEmbeddings,
ImageFeatureEmbeddings,
TransformerModule,
)
PARAMS_DICT = {
"vocab_size": 20,
"embedding_size": 5,
"pad_token_id": 0,
"max_position_embeddings": 3,
"type_vocab_size": 2,
"dropout": 0.5,
}
@pytest.fixture
def params_dict():
return copy.deepcopy(PARAMS_DICT)
@pytest.fixture
def params(params_dict):
return Params(params_dict)
@pytest.fixture
def transformer_embeddings(params):
return TransformerEmbeddings.from_params(params.duplicate())
def test_can_construct_from_params(params_dict, transformer_embeddings):
embeddings = transformer_embeddings.embeddings
assert embeddings.word_embeddings.num_embeddings == params_dict["vocab_size"]
assert embeddings.word_embeddings.embedding_dim == params_dict["embedding_size"]
assert embeddings.word_embeddings.padding_idx == params_dict["pad_token_id"]
assert embeddings.position_embeddings.num_embeddings == params_dict["max_position_embeddings"]
assert embeddings.position_embeddings.embedding_dim == params_dict["embedding_size"]
assert embeddings.token_type_embeddings.num_embeddings == params_dict["type_vocab_size"]
assert embeddings.token_type_embeddings.embedding_dim == params_dict["embedding_size"]
assert transformer_embeddings.layer_norm.normalized_shape[0] == params_dict["embedding_size"]
assert transformer_embeddings.dropout.p == params_dict["dropout"]
def test_sanity():
class TextEmbeddings(TransformerModule, FromParams):
def __init__(
self,
vocab_size: int,
hidden_size: int,
pad_token_id: int,
max_position_embeddings: int,
type_vocab_size: int,
dropout: float,
):
super().__init__()
self.word_embeddings = torch.nn.Embedding(
vocab_size, hidden_size, padding_idx=pad_token_id
)
self.position_embeddings = torch.nn.Embedding(max_position_embeddings, hidden_size)
self.token_type_embeddings = torch.nn.Embedding(type_vocab_size, hidden_size)
self.layer_norm = torch.nn.LayerNorm(hidden_size, eps=1e-12)
self.dropout = torch.nn.Dropout(dropout)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
torch.manual_seed(23)
text = TextEmbeddings(10, 5, 2, 3, 7, 0.0)
torch.manual_seed(23)
transformer = TransformerEmbeddings(10, 5, 2, 3, None, 7, 0.0)
input_ids = torch.tensor([[1, 2]])
token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)
position_ids = torch.tensor([[0, 1]])
text_output = text(input_ids, token_type_ids, position_ids)
transformer_output = transformer(input_ids, token_type_ids, position_ids)
assert_allclose(text_output, transformer_output)
def test_forward_runs_with_inputs(transformer_embeddings):
input_ids = torch.tensor([[1, 2]])
token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)
position_ids = torch.tensor([[0, 1]])
transformer_embeddings(
input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids
)
def test_output_size(params):
input_ids = torch.tensor([[1, 2]])
token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)
position_ids = torch.tensor([[0, 1]])
params["output_size"] = 7
module = TransformerEmbeddings.from_params(params)
output = module(input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
assert output.shape[-1] == 7
def test_no_token_type_layer(params):
params["type_vocab_size"] = 0
module = TransformerEmbeddings.from_params(params)
assert len(module.embeddings) == 2
@pytest.mark.parametrize(
"pretrained_name",
[
"bert-base-cased",
"epwalsh/bert-xsmall-dummy",
],
)
def test_loading_from_pretrained_module(pretrained_name):
TransformerEmbeddings.from_pretrained_module(pretrained_name)
def test_loading_albert():
"""
Albert is a special case because it includes a Linear layer in the encoder
that maps the embeddings to the encoder hidden size, but we include this linear
layer within our embedding layer.
"""
transformer_embedding = TransformerEmbeddings.from_pretrained_module(
"albert-base-v2",
)
albert = AutoModel.from_pretrained("albert-base-v2")
assert_allclose(
transformer_embedding.embeddings.word_embeddings.weight.data,
albert.embeddings.word_embeddings.weight.data,
)
assert_allclose(
transformer_embedding.linear_transform.weight.data,
albert.encoder.embedding_hidden_mapping_in.weight.data,
)
def get_modules():
params = copy.deepcopy(PARAMS_DICT)
params["hidden_dropout_prob"] = params.pop("dropout")
params["hidden_size"] = params.pop("embedding_size")
# bert, roberta, electra self attentions have the same code.
torch.manual_seed(1234)
yield "bert", BertEmbeddings(BertConfig(**params))
albertparams = copy.deepcopy(PARAMS_DICT)
albertparams["hidden_dropout_prob"] = albertparams.pop("dropout")
torch.manual_seed(1234)
yield "albert", AlbertEmbeddings(AlbertConfig(**albertparams))
@pytest.mark.parametrize("module_name, hf_module", get_modules())
def test_forward_against_huggingface_output(transformer_embeddings, module_name, hf_module):
input_ids = torch.tensor([[1, 2]])
token_type_ids = torch.tensor([[1, 0]], dtype=torch.long)
position_ids = torch.tensor([[0, 1]])
state_dict = transformer_embeddings._get_mapped_state_dict(hf_module.state_dict())
if "position_ids" in state_dict:
del state_dict["position_ids"]
transformer_embeddings.load_state_dict(state_dict)
torch.manual_seed(1234)
transformer_embeddings = (
transformer_embeddings.eval()
) # setting to eval mode to avoid non-deterministic dropout.
output = transformer_embeddings(
input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids
)
torch.manual_seed(1234)
hf_module = hf_module.eval() # setting to eval mode to avoid non-deterministic dropout.
hf_output = hf_module(
input_ids=input_ids, token_type_ids=token_type_ids, position_ids=position_ids
)
assert torch.allclose(output, hf_output)
@pytest.fixture
def image_params_dict():
return {"feature_size": 3, "embedding_size": 5, "dropout": 0.1}
@pytest.fixture
def image_params(image_params_dict):
return Params(image_params_dict)
@pytest.fixture
def image_embeddings(image_params):
return ImageFeatureEmbeddings.from_params(image_params.duplicate())
def test_can_construct_image_embeddings_from_params(image_embeddings, image_params_dict):
assert (
image_embeddings.embeddings.image_embeddings.in_features
== image_params_dict["feature_size"]
)
assert (
image_embeddings.embeddings.image_embeddings.out_features
== image_params_dict["embedding_size"]
)
assert (
image_embeddings.embeddings.location_embeddings.out_features
== image_params_dict["embedding_size"]
)
assert image_embeddings.dropout.p == image_params_dict["dropout"]
def test_image_embedding_forward_runs_with_inputs(image_embeddings, image_params_dict):
batch_size = 2
feature_dim = image_params_dict["feature_size"]
image_feature = torch.randn(batch_size, feature_dim)
image_location = torch.randn(batch_size, 4)
image_embeddings(image_feature, image_location)
def test_image_embeddings_sanity(image_params_dict):
class OldImageFeatureEmbeddings(TransformerModule, FromParams):
"""Construct the embeddings from image, spatial location (omit now) and
token_type embeddings.
"""
def __init__(self, feature_size: int, embedding_size: int, dropout: float = 0.0):
super().__init__()
self.image_embeddings = torch.nn.Linear(feature_size, embedding_size)
self.image_location_embeddings = torch.nn.Linear(4, embedding_size, bias=False)
self.layer_norm = torch.nn.LayerNorm(embedding_size, eps=1e-12)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, image_feature: torch.Tensor, image_location: torch.Tensor):
img_embeddings = self.image_embeddings(image_feature)
loc_embeddings = self.image_location_embeddings(image_location)
embeddings = self.layer_norm(img_embeddings + loc_embeddings)
embeddings = self.dropout(embeddings)
return embeddings
torch.manual_seed(23)
old = OldImageFeatureEmbeddings(**image_params_dict)
torch.manual_seed(23)
now = ImageFeatureEmbeddings(**image_params_dict)
batch_size = 2
image_feature = torch.randn(batch_size, image_params_dict["feature_size"])
image_location = torch.randn(batch_size, 4)
torch.manual_seed(23)
old_output = old(image_feature, image_location)
torch.manual_seed(23)
now_output = now(image_feature, image_location)
assert_allclose(old_output, now_output)
| [
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.zeros",
"torch.arange",
"torch.manual_seed",
"torch.tensor",
"torch.testing.assert_allclose",
"torch.allclose",
"torch.randn"
] | 1.6.0 | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 |
0.4 | # coding: utf-8
from __future__ import with_statement, print_function, absolute_import
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import librosa
import pysptk
from wavenet_vocoder.mixture import discretized_mix_logistic_loss
from wavenet_vocoder.mixture import sample_from_discretized_mix_logistic
def log_prob_from_logits(x):
""" numerically stable log_softmax implementation that prevents overflow """
# TF ordering
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=-1, keepdim=True)
return x - m - torch.log(torch.sum(torch.exp(x - m), dim=axis, keepdim=True))
def test_log_softmax():
x = torch.rand(2, 16000, 30)
y = log_prob_from_logits(x)
y_hat = F.log_softmax(x, -1)
y = y.data.cpu().numpy()
y_hat = y_hat.data.cpu().numpy()
assert np.allclose(y, y_hat)
def test_mixture():
np.random.seed(1234)
x, sr = librosa.load(pysptk.util.example_audio_file(), sr=None)
assert sr == 16000
T = len(x)
x = x.reshape(1, T, 1)
y = torch.from_numpy(x).float()
y_hat = torch.rand(1, 30, T).float()
print(y.shape, y_hat.shape)
loss = discretized_mix_logistic_loss(y_hat, y)
print(loss)
loss = discretized_mix_logistic_loss(y_hat, y, reduce=False)
print(loss.size(), y.size())
assert loss.size() == y.size()
y = sample_from_discretized_mix_logistic(y_hat)
print(y.shape)
def test_misc():
# https://en.wikipedia.org/wiki/Logistic_distribution
# what i have learned
# m = (x - mu) / s
m = torch.rand(10, 10)
log_pdf_mid1 = -2 * torch.log(torch.exp(m / 2) + torch.exp(-m / 2))
log_pdf_mid2 = m - 2 * F.softplus(m)
assert np.allclose(log_pdf_mid1.data.numpy(), log_pdf_mid2.data.numpy())
# Edge case for 0
plus_in = torch.rand(10, 10)
log_cdf_plus1 = torch.sigmoid(m).log()
log_cdf_plus2 = m - F.softplus(m)
assert np.allclose(log_cdf_plus1.data.numpy(), log_cdf_plus2.data.numpy())
# Edge case for 255
min_in = torch.rand(10, 10)
log_one_minus_cdf_min1 = (1 - torch.sigmoid(min_in)).log()
log_one_minus_cdf_min2 = -F.softplus(min_in)
assert np.allclose(log_one_minus_cdf_min1.data.numpy(), log_one_minus_cdf_min2.data.numpy())
| [
"torch.rand",
"torch.sigmoid",
"torch.nn.functional.softplus",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.from_numpy",
"torch.exp"
] | 0.4.1 | botmatic/tacotron2 | c2dee4930f6bd1cf707e0565fd0675b8646a51a1 |
1.3 | """ Translation main class """
from __future__ import unicode_literals, print_function
import torch
from onmt.inputters.text_dataset import TextMultiField
class TranslationBuilder(object):
"""
Build a word-based translation from the batch output
of translator and the underlying dictionaries.
Replacement based on "Addressing the Rare Word
Problem in Neural Machine Translation" :cite:`Luong2015b`
Args:
data (onmt.inputters.Dataset): Data.
fields (List[Tuple[str, torchtext.data.Field]]): data fields
n_best (int): number of translations produced
replace_unk (bool): replace unknown words using attention
has_tgt (bool): will the batch have gold targets
"""
def __init__(self, data, fields, n_best=1, replace_unk=False,
has_tgt=False, phrase_table=""):
self.data = data
self.fields = fields
self._has_text_src = isinstance(
dict(self.fields)["src"], TextMultiField)
self.n_best = n_best
self.replace_unk = replace_unk
self.phrase_table = phrase_table
self.has_tgt = has_tgt
def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):
tgt_field = dict(self.fields)["tgt"].base_field
eos_idx = tgt_field.vocab.stoi[tgt_field.eos_token]
vocab = tgt_field.vocab
tokens = []
for tok in pred:
tokens.append(str(tok.item()))
if tokens[-1] == eos_idx:
tokens = tokens[:-1]
break
if self.replace_unk and attn is not None and src is not None:
for i in range(len(tokens)):
if tokens[i] == tgt_field.unk_token:
_, max_index = attn[i][:len(src_raw)].max(0)
tokens[i] = src_raw[max_index.item()]
if self.phrase_table != "":
with open(self.phrase_table, "r") as f:
for line in f:
if line.startswith(src_raw[max_index.item()]):
tokens[i] = line.split('|||')[1].strip()
return tokens
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert(len(translation_batch["gold_score"]) ==
len(translation_batch["predictions"]))
batch_size = batch.batch_size
preds, pred_score, attn, gold_score, indices = list(zip(
*sorted(zip(translation_batch["predictions"],
translation_batch["scores"],
translation_batch["attention"],
translation_batch["gold_score"],
batch.indices.data),
key=lambda x: x[-1])))
# Sorting
inds, perm = torch.sort(batch.indices)
if self._has_text_src:
src = batch.src[0][:, :, 0].index_select(1, perm)
else:
src = None
tgt = batch.tgt[:, :, 0].index_select(1, perm) \
if self.has_tgt else None
translations = []
for b in range(batch_size):
if self._has_text_src:
src_vocab = self.data.src_vocabs[inds[b]] \
if self.data.src_vocabs else None
src_raw = self.data.examples[inds[b]].src[0]
else:
src_vocab = None
src_raw = None
pred_sents = [self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
preds[b][n], attn[b][n])
for n in range(self.n_best)]
gold_sent = None
if tgt is not None:
gold_sent = self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
tgt[1:, b] if tgt is not None else None, None)
translation = Translation(
src[:, b] if src is not None else None,
src_raw, pred_sents, attn[b], pred_score[b],
gold_sent, gold_score[b]
)
translations.append(translation)
return translations
class Translation(object):
"""Container for a translated sentence.
Attributes:
src (LongTensor): Source word IDs.
src_raw (List[str]): Raw source words.
pred_sents (List[List[str]]): Words from the n-best translations.
pred_scores (List[List[float]]): Log-probs of n-best translations.
attns (List[FloatTensor]) : Attention distribution for each
translation.
gold_sent (List[str]): Words from gold translation.
gold_score (List[float]): Log-prob of gold translation.
"""
__slots__ = ["src", "src_raw", "pred_sents", "attns", "pred_scores",
"gold_sent", "gold_score"]
def __init__(self, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score):
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
def log(self, sent_number):
"""
Log translation.
"""
msg = ['\nSENT {}: {}\n'.format(sent_number, self.src_raw)]
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
msg.append('PRED {}: {}\n'.format(sent_number, pred_sent))
msg.append("PRED SCORE: {:.4f}\n".format(best_score))
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
msg.append('GOLD {}: {}\n'.format(sent_number, tgt_sent))
msg.append(("GOLD SCORE: {:.4f}\n".format(self.gold_score)))
if len(self.pred_sents) > 1:
msg.append('\nBEST HYP:\n')
for score, sent in zip(self.pred_scores, self.pred_sents):
msg.append("[{:.4f}] {}\n".format(score, sent))
return "".join(msg)
| [
"torch.sort"
] | 1.3.1 | KaijuML/data2text-macro-plan-py | 17cebc5db507723d601d21a075adea59b0bd9ffb |
1.9 | import os
import dataclasses
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import OneCycleLR
from pymarlin.core import module_interface, data_interface
from transformers import AutoModelForTokenClassification
from pymarlin.utils.stats import global_stats
from pymarlin.utils.logger.logging_utils import getlogger
from .sequence_labelling_metrics import get_ner_seq_metric
from pymarlin.utils.distributed import rank_zero_only
from pymarlin.plugins.hf_ner.data_classes import NERDataInterface
from pymarlin.plugins import PluginModuleInterface
logger = getlogger(__name__, "DEBUG")
@dataclasses.dataclass
class ModelArguments:
model_name: "bert"
encoder_key: "bert"
hf_model: "bert-base-uncased"
model_file: "pytorch_model.bin"
model_config_file: "config.json"
model_path: None
model_config_path: None
tokenizer_path: None
@dataclasses.dataclass
class ModuleInterfaceArguments:
output_dir: None
max_lr: 0.00004 # Maximum learning rate.
warmup_prop: 0.1 # % of steps
has_labels: True
max_seq_len: 128
pad_label_id: -100
label_all_tokens: False
model_args: ModelArguments = ModelArguments
class NERModule(PluginModuleInterface):
"""NER Task specific ModuleInterface used with a trainer.
The `data` and `model` are required properties and must be set.
Args:
ModuleInterfaceArguments : contains module interface arguments , i.e. max learning rate,
warmup propotion, type of trainer , etc. Also includes modelArguments class as attribute
which include model specific arguments such as hfmodel name , modep path , model file name , etc
"""
def __init__(self, args, data: NERDataInterface):
super().__init__()
self.args = args
self.metric_func = get_ner_seq_metric
self.data = data
self.auto_setup(AutoModelForTokenClassification)
def get_train_dataloader(self, sampler: torch.utils.data.Sampler, batch_size: int):
train_ds = self.data.get_train_dataset()
logger.info(f"Training samples = {len(train_ds)}")
dl = DataLoader(
train_ds,
batch_size=batch_size,
collate_fn=self.collate_func,
sampler=sampler(train_ds),
)
return dl
def get_val_dataloaders(self, sampler: torch.utils.data.Sampler, batch_size: int):
val_ds = self.data.get_val_dataset()
logger.info(f"Validation samples = {len(val_ds)}")
dl = DataLoader(
val_ds,
batch_size=batch_size,
collate_fn=self.collate_func,
sampler=sampler(val_ds),
)
return dl
def collate_func(self,batch):
sentence, labels = zip(*batch)
sentence, labels = list(sentence), list(labels)
tokenized_inputs = self.tokenizer(
sentence,
padding="max_length",
return_token_type_ids=True,
return_tensors="pt",
truncation=True,
is_split_into_words=True,
max_length=self.args.max_seq_len,
)
label_ids = []
for i in range(len(sentence)): # for each sentence in input
if self.args.has_labels:
current_label_ids = []
current_label = labels[i]
word_ids = tokenized_inputs.word_ids(i)
prev_word_idx = None # To track subwords
for word_idx in word_ids:
if word_idx is None: # special tokens have None
current_label_ids.append(self.args.pad_label_id)
elif (word_idx != prev_word_idx): # First part of a word always gets the label
current_label_ids.append(self.data.label_map[current_label[word_idx]])
else: # other subword tokens get the same label or ignore index, controlled by flag label_all_tokens
current_label_ids.append(
self.data.label_map[current_label[word_idx]]
if self.args.label_all_tokens
else self.args.pad_label_id
)
prev_word_idx = word_idx
label_ids.append(current_label_ids)
tokenized_inputs['labels'] = torch.tensor(label_ids)
return tokenized_inputs
def get_optimizers_schedulers(
self, estimated_global_steps_per_epoch: int, epochs: int
):
self.optimizer = Adam(self.model.parameters(), self.args.max_lr)
self.schedulers = OneCycleLR(
self.optimizer,
max_lr=self.args.max_lr,
steps_per_epoch=estimated_global_steps_per_epoch,
epochs=epochs,
anneal_strategy="linear",
pct_start=self.args.warmup_prop,
div_factor=1e7, # initial lr ~0
final_div_factor=1e10, # final lr ~0
)
return [self.optimizer], [self.schedulers]
def _inputs_to_device(self, batch, device):
inputs = {}
for k, v in batch.items():
if v is not None:
inputs[k] = v.to(device)
return inputs
def train_step(self, global_step, batch, device):
batch = self._inputs_to_device(batch, device)
outputs = self.model.forward(**batch)
loss = outputs.loss
return loss
def val_step(self, global_step, batch, device):
batch = self._inputs_to_device(batch, device)
outputs = self.model.forward(**batch)
if self.args.has_labels:
return outputs.loss, outputs.logits, batch["labels"]
else:
return outputs.logits
def on_end_train_step(self, global_step, train_loss):
global_stats.update("lr", self.optimizer.param_groups[0]["lr"], frequent=True)
@rank_zero_only
def on_end_val_epoch(self, global_step, *inputs, key="default"):
if self.args.has_labels and len(inputs) > 0:
loss, logits, labels = inputs
logits = logits.cpu().numpy()
logits = logits.reshape(-1, logits.shape[-1])
predictions = np.argmax(logits, axis=1)
label_ids = labels.to("cpu").numpy().reshape(-1)
str_preds = [
self.data.get_labels()[int(p)]
for (p, l) in zip(predictions, label_ids)
if l != self.args.pad_label_id
]
str_labels = [
self.data.get_labels()[int(l)]
for (p, l) in zip(predictions, label_ids)
if l != self.args.pad_label_id
]
metrics = self.metric_func(str_labels, str_preds)
for k in metrics:
global_stats.update(k, metrics[k])
else:
logger.info(
"Either validation data was not provided OR no labels were provided to compute metrics."
)
| [
"torch.optim.lr_scheduler.OneCycleLR",
"torch.tensor"
] | 1.9.1 | nifarn/PyMarlin | ea1f5f927aa85112ecebc206d53b5c3ee65704fa |
0.4 |
"""
Created on Tue Jun 23 20:15:11 2020
@author: sarroutim2
"""
"""Genearates a representation for an image input.
"""
import torch.nn as nn
import torch
import torchvision.models as models
class EncoderCNN(nn.Module):
"""Generates a representation for an image input.
"""
def __init__(self, output_size):
"""Load the pretrained ResNet-152 and replace top fc layer.
"""
super(EncoderCNN, self).__init__()
self.cnn = models.resnet50(pretrained=True)#resnet18
for param in self.cnn.parameters():
param.requires_grad = False
self.cnn.fc = nn.Linear(self.cnn.fc.in_features, output_size)
self.bn = nn.BatchNorm1d(output_size, momentum=0.01)
self.init_weights()
"""
super(EncoderCNN, self).__init__()
self.cnn = models.googlenet(pretrained=True)#resnet18
for param in self.cnn.parameters():
param.requires_grad = False
num_features = self.cnn.classifier[6].in_features
features = list(self.cnn.classifier.children())[:-1]
features.extend([nn.Linear(num_features, 512)])
self.cnn.classifier=nn.Sequential(*features)
#self.cnn.fc=nn.Sequential(*features)
self.cnn.fc = nn.Linear(512, output_size)
#self.cnn.classifier = nn.Sequential(*features)
self.bn = nn.BatchNorm1d(output_size, momentum=0.01)
self.init_weights()"""
def init_weights(self):
"""Initialize the weights.
"""
self.cnn.fc.weight.data.normal_(0.0, 0.02)
self.cnn.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract the image feature vectors.
"""
features = self.cnn(images)
output = self.bn(features)
return output
| [
"torch.nn.Linear",
"torch.nn.BatchNorm1d"
] | 0.4.0 | sarrouti/VQG | eb9cbe3ba4f75d85fc55f5f1e746b1f2190f0b2b |
1.4 | """
SEResNet implementation from Cadene's pretrained models
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
Additional credit to https://github.com/creafz
Original model: https://github.com/hujie-frank/SENet
ResNet code gently borrowed from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate
support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here.
"""
import math
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import create_classifier
from .registry import register_model
__all__ = ['SENet']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'layer0.conv1', 'classifier': 'last_linear',
**kwargs
}
default_cfgs = {
'legacy_senet154':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'),
'legacy_seresnet18': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
interpolation='bicubic'),
'legacy_seresnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'),
'legacy_seresnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'),
'legacy_seresnet101': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'),
'legacy_seresnet152': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'),
'legacy_seresnext26_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth',
interpolation='bicubic'),
'legacy_seresnext50_32x4d':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'),
'legacy_seresnext101_32x4d':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'),
}
def _weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.)
nn.init.constant_(m.bias, 0.)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = x.mean((2, 3), keepdim=True)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(
planes * 2, planes * 4, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(
planes * 4, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(
inplanes, width, kernel_size=1, bias=False, stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(
width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes, reduction=reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SENet(nn.Module):
def __init__(self, block, layers, groups, reduction, drop_rate=0.2,
in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1,
downsample_padding=0, num_classes=1000, global_pool='avg'):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
self.num_classes = num_classes
self.drop_rate = drop_rate
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(in_chans, 64, 3, stride=1, padding=1, bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=0, bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=0, bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(
in_chans, inplanes, kernel_size=7, stride=(2, 1), padding=(5, 3), bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
# To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`.
self.pool0 = nn.MaxPool2d(3, stride=(2, 1), ceil_mode=True, padding=1)
self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')]
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0,
stride=(2, 1)
)
self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')]
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')]
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=(2, 1),
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')]
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')]
self.num_features = 512 * block.expansion
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
for m in self.modules():
_weight_init(m)
self.cnn_ = nn.Sequential(OrderedDict({
('conv_', nn.Conv2d(2048, 256, 1, stride=1, bias=False)),
('bn_', nn.BatchNorm2d(256)),
('relu_', nn.ReLU(inplace=True)),
}))
def _make_layer(self, block, planes, blocks, groups, reduction, stride=2,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size,
stride=stride, padding=downsample_padding, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.layer0(x)
x = self.pool0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.cnn_(x)
return x
def logits(self, x):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.logits(x)
return x
def _create_senet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
SENet, variant, default_cfg=default_cfgs[variant], pretrained=pretrained, **kwargs)
@register_model
def legacy_seresnet18(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet18', pretrained, **model_args)
@register_model
def legacy_seresnet34(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet34', pretrained, **model_args)
@register_model
def legacy_seresnet50(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet50', pretrained, **model_args)
@register_model
def legacy_seresnet101(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet101', pretrained, **model_args)
@register_model
def legacy_seresnet152(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet152', pretrained, **model_args)
@register_model
def legacy_senet154(pretrained=False, **kwargs):
model_args = dict(
block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16,
downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs)
return _create_senet('legacy_senet154', pretrained, **model_args)
@register_model
def legacy_seresnext26_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext50_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext101_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args)
| [
"torch.nn.Sigmoid",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.nn.functional.dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.4.0 | lzmisscc/pytorch-image-models | a32aa96d109292bfef00a631c501bd6c2bd44fdf |
1.0 | import numpy as np
from skimage.transform import resize
import skimage
import torchvision.utils as tvutils
import torch
def rescale_for_display( batch, rescale=True, normalize=False ):
'''
Prepares network output for display by optionally rescaling from [-1,1],
and by setting some pixels to the min/max of 0/1. This prevents matplotlib
from rescaling the images.
'''
if rescale:
display_batch = [ rescale_image( im.copy(), new_scale=[0, 1], current_scale=[-1, 1] )
for im in batch ]
else:
display_batch = batch.copy()
if not normalize:
for im in display_batch:
im[0,0,0] = 1.0 # Adjust some values so that matplotlib doesn't rescale
im[0,1,0] = 0.0 # Now adjust the min
return display_batch
def rescale_image(im, new_scale=[-1.,1.], current_scale=None, no_clip=False):
"""
Rescales an image pixel values to target_scale
Args:
img: A np.float_32 array, assumed between [0,1]
new_scale: [min,max]
current_scale: If not supplied, it is assumed to be in:
[0, 1]: if dtype=float
[0, 2^16]: if dtype=uint
[0, 255]: if dtype=ubyte
Returns:
rescaled_image
"""
# im = im.astype(np.float32)
if current_scale is not None:
min_val, max_val = current_scale
if not no_clip:
im = np.clip(im, min_val, max_val)
im = im - min_val
im /= (max_val - min_val)
min_val, max_val = new_scale
im *= (max_val - min_val)
im += min_val
im = skimage.img_as_float(im)
return im
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
By kchen @ https://github.com/kchen92/joint-representation/blob/24b30ca6963d2ec99618af379c1e05e1f7026710/lib/data/input_pipeline_feed_dict.py
"""
if type(im) == PIL.PngImagePlugin.PngImageFile:
interps = [PIL.Image.NEAREST, PIL.Image.BILINEAR]
return skimage.util.img_as_float(im.resize(new_dims, interps[interp_order]))
if all( new_dims[i] == im.shape[i] for i in range( len( new_dims ) ) ):
resized_im = im #return im.astype(np.float32)
elif im.shape[-1] == 1 or im.shape[-1] == 3:
# # skimage is fast but only understands {1,3} channel images
resized_im = resize(im, new_dims, order=interp_order, preserve_range=True)
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
# resized_im = resized_im.astype(np.float32)
return resized_im
def resize_rescale_image(img, new_dims, new_scale, interp_order=1, current_scale=None, no_clip=False):
"""
Resize an image array with interpolation, and rescale to be
between
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
img = skimage.img_as_float( img )
img = resize_image( img, new_dims, interp_order )
img = rescale_image( img, new_scale, current_scale=current_scale, no_clip=no_clip )
return img
def pack_images(x, prediction, label, mask=None):
uncertainty = None
if isinstance(prediction, tuple):
prediction, uncertainty = prediction
if len(label.shape) == 4 and label.shape[1] == 2:
zeros = torch.zeros(label.shape[0], 1, label.shape[2], label.shape[3]).to(label.device)
label = torch.cat([label, zeros], dim=1)
prediction = torch.cat([prediction, zeros], dim=1)
if uncertainty is not None:
uncertainty = torch.cat([uncertainty, zeros], dim=1)
if mask is not None:
mask = torch.cat([mask, mask[:,0].unsqueeze(1)], dim=1)
if len(x.shape) == 4 and x.shape[1] == 2:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
to_cat.append(prediction.expand(shape_with_three_channels))
if uncertainty is not None:
print(uncertainty.min(), uncertainty.max())
uncertainty = 2*uncertainty - 1.0
uncertainty = uncertainty.clamp(min=-1.0, max=1.0)
to_cat.append(uncertainty.expand(shape_with_three_channels))
to_cat.append(label.expand(shape_with_three_channels))
if mask is not None:
to_cat.append(mask.expand(shape_with_three_channels))
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples
def maybe_entriple(x, is_mask=False):
if x.shape[1] == 2:
if is_mask:
x = torch.cat([x, x[:,0].unsqueeze(1)], dim=1)
else:
zeros = torch.zeros(x.shape[0], 1, x.shape[2], x.shape[3]).to(x.device)
x = torch.cat([x, zeros], dim=1)
shape_with_three_channels = list(x.shape)
shape_with_three_channels[1] = 3
return x.expand(shape_with_three_channels)
def pack_chained_images(x, predictions, labels, mask=None):
x = maybe_entriple(x)
if mask is not None:
mask = maybe_entriple(mask, is_mask=True)
tripled_predictions, uncertainties = [], []
for p in predictions:
if isinstance(p, tuple):
p, u = p
uncertainties.append(maybe_entriple(u))
else:
uncertainties.append(None)
tripled_predictions.append(maybe_entriple(p))
predictions = tripled_predictions
labels = [maybe_entriple(l) for l in labels]
to_cat = []
if x.shape[1] <= 3:
to_cat.append(x)
for pred, uncert, label in zip(predictions, uncertainties, labels):
to_cat.append(label)
to_cat.append(pred)
if uncert is not None:
print(uncert.min(), uncert.max())
uncert = 2*uncert - 1.0
uncert = uncert.clamp(min=-1.0, max=1.0)
to_cat.append(uncert)
if mask is not None:
to_cat.append(mask)
# print([p.shape for p in to_cat])
im_samples = torch.cat(to_cat, dim=3)
im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2)
return im_samples | [
"torch.zeros",
"torch.cat"
] | 1.0.1 | joel99/midlevel-reps | f0b4a4d8ccf09a0488cd18af24723172aff99446 |
1.0 | import torch
from habitat.sims.habitat_simulator import SimulatorActions
try:
from habitat.sims.habitat_simulator import SIM_NAME_TO_ACTION
except:
pass
# TODO these are action values. Make sure to add the word "action" into the name
FORWARD_VALUE = SimulatorActions.FORWARD.value
FORWARD_VALUE = FORWARD_VALUE if isinstance(FORWARD_VALUE, int) else SIM_NAME_TO_ACTION[FORWARD_VALUE]
STOP_VALUE = SimulatorActions.STOP.value
STOP_VALUE = STOP_VALUE if isinstance(STOP_VALUE, int) else SIM_NAME_TO_ACTION[STOP_VALUE]
LEFT_VALUE = SimulatorActions.LEFT.value
LEFT_VALUE = LEFT_VALUE if isinstance(LEFT_VALUE, int) else SIM_NAME_TO_ACTION[LEFT_VALUE]
RIGHT_VALUE = SimulatorActions.RIGHT.value
RIGHT_VALUE = RIGHT_VALUE if isinstance(RIGHT_VALUE, int) else SIM_NAME_TO_ACTION[RIGHT_VALUE]
TAKEOVER1 = [LEFT_VALUE] * 4 + [FORWARD_VALUE] * 4
TAKEOVER2 = [RIGHT_VALUE] * 4 + [FORWARD_VALUE] * 4
TAKEOVER3 = [LEFT_VALUE] * 6 + [FORWARD_VALUE] * 2
TAKEOVER4 = [RIGHT_VALUE] * 6 + [FORWARD_VALUE] * 2
# TAKEOVER5 = [LEFT_VALUE] * 8 # rotation only seems not to break out of bad behavior
# TAKEOVER6 = [RIGHT_VALUE] * 8
TAKEOVER_ACTION_SEQUENCES = [TAKEOVER1, TAKEOVER2, TAKEOVER3, TAKEOVER4]
TAKEOVER_ACTION_SEQUENCES = [torch.Tensor(t).long() for t in TAKEOVER_ACTION_SEQUENCES]
DEFAULT_TAKEOVER_ACTIONS = torch.Tensor([LEFT_VALUE, LEFT_VALUE, LEFT_VALUE, LEFT_VALUE, FORWARD_VALUE, FORWARD_VALUE]).long()
NON_STOP_VALUES = torch.Tensor([FORWARD_VALUE, LEFT_VALUE, RIGHT_VALUE]).long()
| [
"torch.Tensor"
] | 1.0.1 | joel99/midlevel-reps | f0b4a4d8ccf09a0488cd18af24723172aff99446 |
1.3 | from torch.utils.data import DataLoader
from torchvision import transforms as T
from torchvision.datasets import CIFAR10
import pytorch_lightning as pl
class CIFAR10Data(pl.LightningDataModule):
""" returns cifar-10 examples in floats in range [0,1] """
def __init__(self, args):
super().__init__()
self.a=args
# self.a = args
def train_dataloader(self):
transform = T.Compose(
[
T.RandomCrop(32, padding=4, padding_mode='reflect'),
T.RandomHorizontalFlip(),
T.ToTensor(),
]
)
dataset = CIFAR10(root=self.a.data_dir, train=True, transform=transform, download=True)
dataloader = DataLoader(
dataset,
batch_size=self.a.batch_size,
num_workers=self.a.num_workers,
drop_last=True,
pin_memory=True,
shuffle=True,
)
return dataloader
def val_dataloader(self):
transform = T.Compose(
[
T.ToTensor(),
]
)
dataset = CIFAR10(root=self.a.data_dir, train=False, transform=transform, download=True)
dataloader = DataLoader(
dataset,
batch_size=self.a.batch_size,
num_workers=self.a.num_workers,
drop_last=True,
pin_memory=True,
)
return dataloader
def test_dataloader(self):
return self.val_dataloader()
| [
"torch.utils.data.DataLoader"
] | 1.3.0 | zhangbo2008/vqvae_pytorch | 98f2f2386328245ae26ac999528c7dda57680aca |
1.6 | # NASNet Search Space https://arxiv.org/pdf/1707.07012.pdf
# code modified from DARTS https://github.com/quark0/darts
import numpy as np
from collections import namedtuple
import torch
from algs.nsga_net.model.micro_models import NetworkCIFAR as Network
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
Genotype_norm = namedtuple('Genotype', 'normal normal_concat')
Genotype_redu = namedtuple('Genotype', 'reduce reduce_concat')
# what you want to search should be defined here and in micro_operations
PRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5',
'sep_conv_7x7',
'conv_7x1_1x7',
]
def convert_cell(cell_bit_string):
# convert cell bit-string to genome
tmp = [cell_bit_string[i:i + 2] for i in range(0, len(cell_bit_string), 2)]
return [tmp[i:i + 2] for i in range(0, len(tmp), 2)]
def convert(bit_string):
# convert network bit-string (norm_cell + redu_cell) to genome
norm_gene = convert_cell(bit_string[:len(bit_string)//2])
redu_gene = convert_cell(bit_string[len(bit_string)//2:])
return [norm_gene, redu_gene]
def decode_cell(genome, norm=True):
cell, cell_concat = [], list(range(2, len(genome)+2))
for block in genome:
for unit in block:
cell.append((PRIMITIVES[unit[0]], unit[1]))
if unit[1] in cell_concat:
cell_concat.remove(unit[1])
if norm:
return Genotype_norm(normal=cell, normal_concat=cell_concat)
else:
return Genotype_redu(reduce=cell, reduce_concat=cell_concat)
def decode(genome):
# decodes genome to architecture
normal_cell = genome[0]
reduce_cell = genome[1]
normal, normal_concat = [], list(range(2, len(normal_cell)+2))
reduce, reduce_concat = [], list(range(2, len(reduce_cell)+2))
for block in normal_cell:
for unit in block:
normal.append((PRIMITIVES[int(unit[0])], int(unit[1])))
if unit[1] in normal_concat:
normal_concat.remove(unit[1])
for block in reduce_cell:
for unit in block:
reduce.append((PRIMITIVES[unit[0]], unit[1]))
if unit[1] in reduce_concat:
reduce_concat.remove(unit[1])
return Genotype(
normal=normal, normal_concat=normal_concat,
reduce=reduce, reduce_concat=reduce_concat
)
def compare_cell(cell_string1, cell_string2):
cell_genome1 = convert_cell(cell_string1)
cell_genome2 = convert_cell(cell_string2)
cell1, cell2 = cell_genome1[:], cell_genome2[:]
for block1 in cell1:
for block2 in cell2:
if block1 == block2 or block1 == block2[::-1]:
cell2.remove(block2)
break
if len(cell2) > 0:
return False
else:
return True
def compare(string1, string2):
if compare_cell(string1[:len(string1)//2],
string2[:len(string2)//2]):
if compare_cell(string1[len(string1)//2:],
string2[len(string2)//2:]):
return True
return False
def debug():
# design to debug the encoding scheme
seed = 0
np.random.seed(seed)
budget = 2000
B, n_ops, n_cell = 5, 7, 2
networks = []
design_id = 1
while len(networks) < budget:
bit_string = []
for c in range(n_cell):
for b in range(B):
bit_string += [np.random.randint(n_ops),
np.random.randint(b + 2),
np.random.randint(n_ops),
np.random.randint(b + 2)
]
genome = convert(bit_string)
# check against evaluated networks in case of duplicates
doTrain = True
for network in networks:
if compare(genome, network):
doTrain = False
break
if doTrain:
genotype = decode(genome)
model = Network(16, 10, 8, False, genotype)
model.drop_path_prob = 0.0
data = torch.randn(1, 3, 32, 32)
output, output_aux = model(torch.autograd.Variable(data))
networks.append(genome)
design_id += 1
print(design_id)
if __name__ == "__main__":
bit_string1 = [3,1,3,0,3,1,3,0,3,1,2,0,2,0,5,2,0,0,0,1,2,2,0,1,0,0,2,2,2,2,0,1]
| [
"torch.randn",
"torch.autograd.Variable"
] | 1.6.0 | Beautyya/BenchENA | 776cd1dd035d73c4af369d0106d010b932f64782 |
0.4 | """
Adopted from AllenNLP:
https://github.com/allenai/allennlp/blob/v0.6.1/allennlp/nn/initializers.py
An initializer is just a PyTorch function.
Here we implement a proxy class that allows us
to register them and supply any additional function arguments
(for example, the ``mean`` and ``std`` of a normal initializer)
as named arguments to the constructor.
The available initialization functions are
* `"normal" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.normal_>`_
* `"uniform" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.uniform_>`_
* `"constant" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.constant_>`_
* `"eye" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.eye_>`_
* `"dirac" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.dirac_>`_
* `"xavier_uniform" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_uniform_>`_
* `"xavier_normal" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.xavier_normal_>`_
* `"kaiming_uniform" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_uniform_>`_
* `"kaiming_normal" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.kaiming_normal_>`_
* `"orthogonal" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.orthogonal_>`_
* `"sparse" <http://pytorch.org/docs/master/nn.html?highlight=orthogonal#torch.nn.init.sparse_>`_
* :func:`"block_orthogonal" <block_orthogonal>`
* :func:`"uniform_unit_scaling" <uniform_unit_scaling>`
"""
import re
import math
from typing import Callable, List, Tuple, Type, Iterable
import itertools
import torch
import torch.nn.init
from stog.utils import logging
from stog.utils.checks import ConfigurationError
logger = logging.init_logger() # pylint: disable=invalid-name
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"):
"""
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where
``dim[0]`` is equal to the input dimension of the parameter and the ``scale``
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
Parameters
----------
tensor : ``torch.Tensor``, required.
The tensor to initialise.
nonlinearity : ``str``, optional (default = "linear")
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the ``torch.nn.functional`` package.
Returns
-------
The initialised tensor.
"""
size = 1.
# Estimate the input size. This won't work perfectly,
# but it covers almost all use cases where this initialiser
# would be expected to be useful, i.e in large linear and
# convolutional layers, as the last dimension will almost
# always be the output size.
for dimension in list(tensor.size())[:-1]:
size *= dimension
activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)
max_value = math.sqrt(3 / size) * activation_scaling
return tensor.uniform_(-max_value, max_value)
def block_orthogonal(tensor: torch.Tensor,
split_sizes: List[int],
gain: float = 1.0) -> None:
"""
An initializer which allows initializing model parameters in "blocks". This is helpful
in the case of recurrent models which use multiple gates applied to linear projections,
which can be computed efficiently if they are concatenated together. However, they are
separate parameters which should be initialized independently.
Parameters
----------
tensor : ``torch.Tensor``, required.
A tensor to initialize.
split_sizes : List[int], required.
A list of length ``tensor.ndim()`` specifying the size of the
blocks along that particular dimension. E.g. ``[10, 20]`` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : float, optional (default = 1.0)
The gain (scaling) applied to the orthogonal initialization.
"""
data = tensor.data
sizes = list(tensor.size())
if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):
raise ConfigurationError("tensor dimensions must be divisible by their respective "
"split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes))
indexes = [list(range(0, max_size, split))
for max_size, split in zip(sizes, split_sizes)]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple([slice(start_index, start_index + step)
for start_index, step in index_and_step_tuples])
data[block_slice] = torch.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)
def zero(tensor: torch.Tensor) -> None:
return tensor.data.zero_()
def lstm_hidden_bias(tensor: torch.Tensor) -> None:
"""
Initialize the biases of the forget gate to 1, and all other gates to 0,
following Jozefowicz et al., An Empirical Exploration of Recurrent Network Architectures
"""
# gates are (b_hi|b_hf|b_hg|b_ho) of shape (4*hidden_size)
tensor.data.zero_()
hidden_size = tensor.shape[0] // 4
tensor.data[hidden_size:(2 * hidden_size)] = 1.0
| [
"torch.nn.init.calculate_gain"
] | 0.4.1 | sfillwo/stog | b965c47c17472eea11ab63aab9aa738af7875f06 |
1.0 | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import numpy as np
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, c, h, w].
Returns:
a float tensor with shape [batch_size, c*h*w].
"""
# without this pretrained model isn't working
x = x.transpose(3, 2).contiguous()
return x.view(x.size(0), -1)
class PNet(nn.Module):
def __init__(self):
super(PNet, self).__init__()
# suppose we have input with size HxW, then
# after first layer: H - 2,
# after pool: ceil((H - 2)/2),
# after second conv: ceil((H - 2)/2) - 2,
# after last conv: ceil((H - 2)/2) - 4,
# and the same for W
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 10, 3, 1)),
('prelu1', nn.PReLU(10)),
('pool1', nn.MaxPool2d(2, 2, ceil_mode=True)),
('conv2', nn.Conv2d(10, 16, 3, 1)),
('prelu2', nn.PReLU(16)),
('conv3', nn.Conv2d(16, 32, 3, 1)),
('prelu3', nn.PReLU(32))
]))
self.conv4_1 = nn.Conv2d(32, 2, 1, 1)
self.conv4_2 = nn.Conv2d(32, 4, 1, 1)
weights = np.load('face_module/mtcnn_pytorch/src/weights/pnet.npy')[()]
for n, p in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, 3, h, w].
Returns:
b: a float tensor with shape [batch_size, 4, h', w'].
a: a float tensor with shape [batch_size, 2, h', w'].
"""
x = self.features(x)
a = self.conv4_1(x)
b = self.conv4_2(x)
a = F.softmax(a, dim=-1)
return b, a
class RNet(nn.Module):
def __init__(self):
super(RNet, self).__init__()
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 28, 3, 1)),
('prelu1', nn.PReLU(28)),
('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv2', nn.Conv2d(28, 48, 3, 1)),
('prelu2', nn.PReLU(48)),
('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv3', nn.Conv2d(48, 64, 2, 1)),
('prelu3', nn.PReLU(64)),
('flatten', Flatten()),
('conv4', nn.Linear(576, 128)),
('prelu4', nn.PReLU(128))
]))
self.conv5_1 = nn.Linear(128, 2)
self.conv5_2 = nn.Linear(128, 4)
weights = np.load('face_module/mtcnn_pytorch/src/weights/rnet.npy')[()]
for n, p in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, 3, h, w].
Returns:
b: a float tensor with shape [batch_size, 4].
a: a float tensor with shape [batch_size, 2].
"""
x = self.features(x)
a = self.conv5_1(x)
b = self.conv5_2(x)
a = F.softmax(a, dim=-1)
return b, a
class ONet(nn.Module):
def __init__(self):
super(ONet, self).__init__()
self.features = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 32, 3, 1)),
('prelu1', nn.PReLU(32)),
('pool1', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv2', nn.Conv2d(32, 64, 3, 1)),
('prelu2', nn.PReLU(64)),
('pool2', nn.MaxPool2d(3, 2, ceil_mode=True)),
('conv3', nn.Conv2d(64, 64, 3, 1)),
('prelu3', nn.PReLU(64)),
('pool3', nn.MaxPool2d(2, 2, ceil_mode=True)),
('conv4', nn.Conv2d(64, 128, 2, 1)),
('prelu4', nn.PReLU(128)),
('flatten', Flatten()),
('conv5', nn.Linear(1152, 256)),
('drop5', nn.Dropout(0.25)),
('prelu5', nn.PReLU(256)),
]))
self.conv6_1 = nn.Linear(256, 2)
self.conv6_2 = nn.Linear(256, 4)
self.conv6_3 = nn.Linear(256, 10)
weights = np.load('face_module/mtcnn_pytorch/src/weights/onet.npy')[()]
for n, p in self.named_parameters():
p.data = torch.FloatTensor(weights[n])
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [batch_size, 3, h, w].
Returns:
c: a float tensor with shape [batch_size, 10].
b: a float tensor with shape [batch_size, 4].
a: a float tensor with shape [batch_size, 2].
"""
x = self.features(x)
a = self.conv6_1(x)
b = self.conv6_2(x)
c = self.conv6_3(x)
a = F.softmax(a, dim = -1)
return c, b, a
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.FloatTensor",
"torch.nn.Conv2d",
"torch.nn.PReLU",
"torch.nn.functional.softmax"
] | 1.0.1 | furkanc/Yolov3-Face-Recognition | d3074490a6a7bf83925319ed521b557919d0af7e |
1.7 | # -*- coding: utf-8 -*-
# (C) Copyright 2020, 2021 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Analog mapped layers."""
from typing import Optional, Tuple, List
from torch import Tensor, cat, split, no_grad
from torch.nn import Linear
from aihwkit.nn.functions import AnalogFunction
from aihwkit.nn.modules.base import AnalogModuleBase, RPUConfigAlias
from aihwkit.simulator.configs import SingleRPUConfig
from aihwkit.exceptions import ModuleError
class AnalogLinearMapped(AnalogModuleBase, Linear):
"""Linear layer that uses an analog tile.
Linear layer that uses an analog tile during its forward, backward
and update passes. In contrast to
:class:`~aihwkit.bb.modules.linear.Linear` the maximal in and/or
out dimension can be restricted, in which case the linear layer is
split into multiple parts and computed on multiple tiles of given
max sizes.
In contrast to :class:`~aihwkit.bb.modules.linear.Linear`, the
bias vector (if requested) is always handled in digital (floating
point).
Note:
Mapping is controlled by the :class:`aihwkit.simulator.configs.utils.MappingParameter`.
Note:
The tensor parameters of this layer (``.weight`` and ``.bias``) are not
guaranteed to contain the same values as the internal weights and biases
stored in the analog tile. Please use ``set_weights`` and
``get_weights`` when attempting to read or modify the weight/bias. This
read/write process can simulate the (noisy and inexact) analog writing
and reading of the resistive elements.
Args:
in_features: input vector size (number of columns).
out_features: output vector size (number of rows).
rpu_config: resistive processing unit configuration.
bias: whether to use a bias row on the analog tile or not
realistic_read_write: whether to enable realistic read/write
for setting initial weights and read out of weights
weight_scaling_omega: the weight value where the max
weight will be scaled to. If zero, no weight scaling will
be performed
"""
# pylint: disable=abstract-method, too-many-locals, too-many-instance-attributes
__constants__ = ['in_features', 'out_features', 'realistic_read_write', 'weight_scaling_omega',
'digital_bias', 'analog_bias', 'use_bias']
in_features: int
out_features: int
realistic_read_write: bool
weight_scaling_omega: float
digital_bias: bool
analog_bias: bool
use_bias: bool
in_sizes: List[int]
out_sizes: List[int]
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
rpu_config: Optional[RPUConfigAlias] = None,
realistic_read_write: bool = False,
weight_scaling_omega: float = 0.0,
):
# Call super() after tile creation, including ``reset_parameters``.
Linear.__init__(self, in_features, out_features, bias=bias)
# Create tiles
if rpu_config is None:
rpu_config = SingleRPUConfig()
AnalogModuleBase.__init__(
self,
in_features,
out_features,
bias,
realistic_read_write,
weight_scaling_omega,
rpu_config.mapping
)
if self.analog_bias:
raise ModuleError("AnalogLinearMapped only supports digital bias.")
# More than one tile may need to be created. If so, divide
# weight matrix into equal pieces along input dimension with
# as many tiles as needed
max_input_size = rpu_config.mapping.max_input_size
max_output_size = rpu_config.mapping.max_output_size
self.in_sizes = self.get_split_sizes(in_features, max_input_size)
self.out_sizes = self.get_split_sizes(out_features, max_output_size)
self.analog_tile_array = []
for i, in_tile_size in enumerate(self.in_sizes):
in_tiles = []
for j, out_tile_size in enumerate(self.out_sizes):
tile = rpu_config.tile_class(out_tile_size,
in_tile_size,
rpu_config,
bias=self.analog_bias)
self.register_analog_tile(tile, name=f"{i}_{j}")
in_tiles.append(tile)
self.analog_tile_array.append(in_tiles)
# Set weights from the reset_parameters
self.set_weights(self.weight, self.bias)
# Unregister weight/bias as a parameter but keep for sync
self.unregister_parameter('weight')
if self.analog_bias:
self.unregister_parameter('bias')
def get_split_sizes(self, size: int, split_max_size: int) -> List[int]:
""" Computed the split sizes.
Args:
size: number of elements of the layer in one dimension
split_max_size: max size of the split
Returns:
List of split sizes
"""
if split_max_size <= 0:
return [size]
n_splits = (size + split_max_size - 1) // split_max_size
base, extra = divmod(size, n_splits)
return [base + (i < extra) for i in range(n_splits)]
def set_weights(
self,
weight: Tensor,
bias: Optional[Tensor] = None,
force_exact: bool = False
) -> None:
"""Set the weight (and bias) with given Tensors.
This uses an realistic write if the property ``realistic_read_write``
of the layer is set, unless it is overwritten by ``force_exact``. It
uses a scaled write if ``weight_scaling_omega`` is positive (see
:meth:`~aihwkit.simulator.tiles.base.BaseTile.set_weights_scaled`).
Note:
This is the recommended way for setting the weight/bias matrix of
the analog tile, as it will correctly store the weights into the
internal memory. Directly writing to ``self.weight`` and
``self.bias`` might yield wrong results as they are not always in
sync with the analog tile Parameters, for performance reasons.
Args:
weight: weight matrix
bias: bias vector
force_exact: forces an exact write to the analog tiles
"""
shape = [self.out_features, self.in_features]
weight = weight.clone().reshape(shape)
realistic = self.realistic_read_write and not force_exact
in_start = in_end = 0
for in_size, in_tiles in zip(self.in_sizes, self.analog_tile_array):
in_end += in_size
out_start = out_end = 0
for out_size, analog_tile in zip(self.out_sizes, in_tiles):
out_end += out_size
tile_weight = weight[out_start:out_end, in_start:in_end]
if self.weight_scaling_omega > 0.0:
analog_tile.set_weights_scaled(tile_weight, None,
realistic=realistic,
omega=self.weight_scaling_omega)
else:
analog_tile.set_weights(tile_weight, None, realistic=realistic)
out_start = out_end
in_start = in_end
if self.digital_bias and bias is not None:
with no_grad():
self.bias.data[:] = bias[:]
self._sync_weights_from_tile()
def get_weights(self, force_exact: bool = False) -> Tuple[Tensor, Optional[Tensor]]:
"""Get the weight (and bias) tensors.
This uses an realistic read if the property ``realistic_read_write`` of
the layer is set, unless it is overwritten by ``force_exact``. It
scales the analog weights by the digital alpha scale if
``weight_scaling_omega`` is positive (see
:meth:`~aihwkit.simulator.tiles.base.BaseTile.get_weights_scaled`).
Note:
This is the recommended way for setting the weight/bias matrix from
the analog tile, as it will correctly fetch the weights from the
internal memory. Accessing ``self.weight`` and ``self.bias`` might
yield wrong results as they are not always in sync with the
analog tile library, for performance reasons.
Args:
force_exact: forces an exact read to the analog tiles
Returns:
tuple: weight matrix, bias vector
"""
realistic = self.realistic_read_write and not force_exact
weight_lst = []
for in_tiles in self.analog_tile_array:
in_tile_weight = []
for analog_tile in in_tiles:
if self.weight_scaling_omega > 0.0:
tile_weight, _ = analog_tile.get_weights_scaled(realistic=realistic)
else:
tile_weight, _ = analog_tile.get_weights(realistic=realistic)
in_tile_weight.append(tile_weight)
weight_lst.append(cat(in_tile_weight, 0))
weight = cat(weight_lst, 1)
if self.digital_bias:
with no_grad():
return weight, self.bias.data.detach().cpu()
return weight, None
def reset_parameters(self) -> None:
"""Reset the parameters (weight and bias)."""
super().reset_parameters()
if self.analog_tile_count():
self.set_weights(self.weight, self.bias)
def forward(self, x_input: Tensor) -> Tensor:
"""Compute the forward pass."""
# pylint: disable=arguments-differ,arguments-renamed
if self.analog_tile_count() == 1:
out = AnalogFunction.apply(
self.analog_tile_array[0][0].get_analog_ctx(), x_input,
self.analog_tile_array[0][0].shared_weights, not self.training)
if self.digital_bias:
return out + self.bias
return out
# mapped version
last_dim = x_input.ndim - 1
splits = split(x_input, self.in_sizes, dim=last_dim)
result = None # type: Tensor
for idx, (x, in_tiles) in enumerate(zip(splits, self.analog_tile_array)):
out_result = []
for analog_tile in in_tiles:
output = AnalogFunction.apply(
analog_tile.get_analog_ctx(), x,
analog_tile.shared_weights, not self.training)
out_result.append(output)
if idx == 0:
result = cat(out_result, last_dim)
else:
result.add_(cat(out_result, last_dim))
# add bias to final result
if self.digital_bias:
return result.add_(self.bias)
return result
def extra_repr(self) -> str:
"""Set the extra representation of the module.
Returns:
A string with the extra representation.
"""
output = AnalogModuleBase.extra_repr(self)
output += ', mapping={}'.format((len(self.in_sizes), len(self.out_sizes)))
return output
@classmethod
def from_digital(
cls,
module: Linear,
rpu_config: Optional[RPUConfigAlias] = None,
realistic_read_write: bool = False,
weight_scaling_omega: float = 0.0,
) -> 'AnalogLinearMapped':
"""Return an AnalogLinearMapped layer from a torch Linear layer.
Args:
module: The torch module to convert. All layers that are
defined in the ``conversion_map``.
rpu_config: RPU config to apply to all converted tiles.
Applied to all converted tiles.
realistic_read_write: Whether to use closed-loop programming
when setting the weights. Applied to all converted tiles.
weight_scaling_omega: If non-zero, the analog weights will be
scaled by ``weight_scaling_omega`` divided by the absolute
maximum value of the original weight matrix.
Note:
Make sure that the weight max and min settings of the
device support the desired analog weight range.
Returns:
an AnalogLinearMapped layer based on the digital Linear ``module``.
"""
analog_module = cls(module.in_features,
module.out_features,
module.bias is not None,
rpu_config,
realistic_read_write,
weight_scaling_omega,
)
analog_module.set_weights(module.weight, module.bias)
return analog_module
| [
"torch.cat",
"torch.nn.Linear.__init__",
"torch.no_grad",
"torch.split"
] | 1.7 | todd-deshane/aihwkit | 07269e29731f9a6482d25326400437f6bef2fc94 |
1.7 | # -*- coding: utf-8 -*-
# (C) Copyright 2020, 2021 IBM. All Rights Reserved.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Autograd functions for aihwkit."""
from typing import Any, Optional, Tuple
from torch import Tensor, empty_like
from torch.autograd import Function
from aihwkit.optim.context import AnalogContext
class AnalogFunctionBase(Function):
"""Base function for analog functions."""
# pylint: disable=arguments-differ, protected-access, abstract-method
@staticmethod
def forward(
ctx: Any,
analog_ctx: AnalogContext,
input_: Tensor,
shared_weights: Optional[Tensor] = None,
is_test: bool = False) -> Tensor:
"""Execute the forward pass in the analog tile.
Note: Indexed versions can used when analog_ctx.use_indexed is
set to True.
"""
# Store in context for using during `backward()`.
analog_tile = analog_ctx.analog_tile
ctx.analog_ctx = analog_ctx
ctx.shared_weights = None
ctx.save_for_backward(input_)
use_indexed = analog_ctx.use_indexed
if shared_weights is not None:
ctx.shared_weights = shared_weights
analog_tile.ensure_shared_weights(shared_weights)
analog_ctx.use_torch_update = True
else:
analog_ctx.use_torch_update = False
# Invoke the forward pass in the tile instance.
if use_indexed:
return analog_tile.forward_indexed(input_, is_test)
return analog_tile.forward(input_, is_test)
@staticmethod
def backward(
ctx: Any,
grad_output: Tensor,
) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor], Optional[Tensor]]:
"""Execute the backward pass in the analog tile."""
analog_ctx = ctx.analog_ctx
analog_tile = analog_ctx.analog_tile
input_, = ctx.saved_tensors
shared_weights_grad = None
use_indexed = analog_ctx.use_indexed
if ctx.shared_weights is not None:
analog_tile.ensure_shared_weights(ctx.shared_weights)
# Call the backward function in the tile instance.
if use_indexed:
grad_input = analog_tile.backward_indexed(grad_output)
else:
grad_input = analog_tile.backward(grad_output)
if analog_ctx.use_torch_update:
# Grad computed directly (for inference training)
shared_weights_grad = empty_like(ctx.shared_weights)
analog_tile.set_delta_weights(shared_weights_grad)
if use_indexed:
analog_tile.update_indexed(input_, grad_output)
else:
analog_tile.update(input_, grad_output)
analog_tile.reset_delta_weights()
else:
# Store activation and errors for optimizer (for analog training)
analog_ctx.analog_input.append(input_)
analog_ctx.analog_grad_output.append(grad_output)
return None, grad_input, shared_weights_grad, None
class AnalogFunction(AnalogFunctionBase):
"""Function that delegates into a `RPU` unit."""
# pylint: disable=arguments-differ, abstract-method
@staticmethod
def forward(
ctx: Any,
analog_ctx: AnalogContext,
input_: Tensor,
shared_weights: Optional[Tensor] = None,
is_test: bool = False) -> Tensor:
"""Execute the forward pass in the analog tile."""
analog_ctx.use_indexed = False
return AnalogFunctionBase.forward(
ctx, analog_ctx, input_, shared_weights, is_test)
class AnalogIndexedFunction(AnalogFunctionBase):
"""Function that delegates into a `RPU` unit to use the indexed forward/backward/update."""
# pylint: disable=arguments-differ, abstract-method
@staticmethod
def forward(
ctx: Any,
analog_ctx: AnalogContext,
input_: Tensor,
shared_weights: Optional[Tensor] = None,
is_test: bool = False) -> Tensor:
"""Execute the forward pass in the analog tile."""
analog_ctx.use_indexed = True
return AnalogFunctionBase.forward(
ctx, analog_ctx, input_, shared_weights, is_test)
| [
"torch.empty_like"
] | 1.7 | todd-deshane/aihwkit | 07269e29731f9a6482d25326400437f6bef2fc94 |
1.9 | import copy
from functools import partial
from collections import OrderedDict
import torch
from torch import nn
from efficientnetv2 import get_efficientnet_v2_structure
from efficientnetv2 import load_from_zoo
class ConvBNAct(nn.Sequential):
"""Convolution-Normalization-Activation Module"""
def __init__(self, in_channel, out_channel, kernel_size, stride, groups, norm_layer, act, conv_layer=nn.Conv2d):
super(ConvBNAct, self).__init__(
conv_layer(in_channel, out_channel, kernel_size, stride=stride, padding=(kernel_size-1)//2, groups=groups, bias=False),
norm_layer(out_channel),
act()
)
class SEUnit(nn.Module):
"""Squeeze-Excitation Unit
paper: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper
"""
def __init__(self, in_channel, reduction_ratio=4, act1=partial(nn.SiLU, inplace=True), act2=nn.Sigmoid):
super(SEUnit, self).__init__()
hidden_dim = in_channel // reduction_ratio
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc1 = nn.Conv2d(in_channel, hidden_dim, (1, 1), bias=True)
self.fc2 = nn.Conv2d(hidden_dim, in_channel, (1, 1), bias=True)
self.act1 = act1()
self.act2 = act2()
def forward(self, x):
return x * self.act2(self.fc2(self.act1(self.fc1(self.avg_pool(x)))))
class StochasticDepth(nn.Module):
"""StochasticDepth
paper: https://link.springer.com/chapter/10.1007/978-3-319-46493-0_39
:arg
- prob: Probability of dying
- mode: "row" or "all". "row" means that each row survives with different probability
"""
def __init__(self, prob, mode):
super(StochasticDepth, self).__init__()
self.prob = prob
self.survival = 1.0 - prob
self.mode = mode
def forward(self, x):
if self.prob == 0.0 or not self.training:
return x
else:
shape = [x.size(0)] + [1] * (x.ndim - 1) if self.mode == 'row' else [1]
return x * torch.empty(shape).bernoulli_(self.survival).div_(self.survival).to(x.device)
class MBConvConfig:
"""EfficientNet Building block configuration"""
def __init__(self, expand_ratio: float, kernel: int, stride: int, in_ch: int, out_ch: int, layers: int,
use_se: bool, fused: bool, act=nn.SiLU, norm_layer=nn.BatchNorm2d):
self.expand_ratio = expand_ratio
self.kernel = kernel
self.stride = stride
self.in_ch = in_ch
self.out_ch = out_ch
self.num_layers = layers
self.act = act
self.norm_layer = norm_layer
self.use_se = use_se
self.fused = fused
@staticmethod
def adjust_channels(channel, factor, divisible=8):
new_channel = channel * factor
divisible_channel = max(divisible, (int(new_channel + divisible / 2) // divisible) * divisible)
divisible_channel += divisible if divisible_channel < 0.9 * new_channel else 0
return divisible_channel
class MBConv(nn.Module):
"""EfficientNet main building blocks
:arg
- c: MBConvConfig instance
- sd_prob: stochastic path probability
"""
def __init__(self, c, sd_prob=0.0):
super(MBConv, self).__init__()
inter_channel = c.adjust_channels(c.in_ch, c.expand_ratio)
block = []
if c.expand_ratio == 1:
block.append(('fused', ConvBNAct(c.in_ch, inter_channel, c.kernel, c.stride, 1, c.norm_layer, c.act)))
elif c.fused:
block.append(('fused', ConvBNAct(c.in_ch, inter_channel, c.kernel, c.stride, 1, c.norm_layer, c.act)))
block.append(('fused_point_wise', ConvBNAct(inter_channel, c.out_ch, 1, 1, 1, c.norm_layer, nn.Identity)))
else:
block.append(('linear_bottleneck', ConvBNAct(c.in_ch, inter_channel, 1, 1, 1, c.norm_layer, c.act)))
block.append(('depth_wise', ConvBNAct(inter_channel, inter_channel, c.kernel, c.stride, inter_channel, c.norm_layer, c.act)))
block.append(('se', SEUnit(inter_channel, 4 * c.expand_ratio)))
block.append(('point_wise', ConvBNAct(inter_channel, c.out_ch, 1, 1, 1, c.norm_layer, nn.Identity)))
self.block = nn.Sequential(OrderedDict(block))
self.use_skip_connection = c.stride == 1 and c.in_ch == c.out_ch
self.stochastic_path = StochasticDepth(sd_prob, "row")
def forward(self, x):
out = self.block(x)
if self.use_skip_connection:
out = x + self.stochastic_path(out)
return out
class EfficientNetV2(nn.Module):
"""Pytorch Implementation of EfficientNetV2
paper: https://arxiv.org/abs/2104.00298
- reference 1 (pytorch): https://github.com/d-li14/efficientnetv2.pytorch/blob/main/effnetv2.py
- reference 2 (official): https://github.com/google/automl/blob/master/efficientnetv2/effnetv2_configs.py
:arg
- layer_infos: list of MBConvConfig
- out_channels: bottleneck channel
- nlcass: number of class
- dropout: dropout probability before classifier layer
- stochastic depth: stochastic depth probability
"""
def __init__(self, layer_infos, out_channels=1280, nclass=0, dropout=0.2, stochastic_depth=0.0,
block=MBConv, act_layer=nn.SiLU, norm_layer=nn.BatchNorm2d):
super(EfficientNetV2, self).__init__()
self.layer_infos = layer_infos
self.norm_layer = norm_layer
self.act = act_layer
self.in_channel = layer_infos[0].in_ch
self.final_stage_channel = layer_infos[-1].out_ch
self.out_channels = out_channels
self.cur_block = 0
self.num_block = sum(stage.num_layers for stage in layer_infos)
self.stochastic_depth = stochastic_depth
self.stem = ConvBNAct(3, self.in_channel, 3, 2, 1, self.norm_layer, self.act)
self.blocks = nn.Sequential(*self.make_stages(layer_infos, block))
self.head = nn.Sequential(OrderedDict([
('bottleneck', ConvBNAct(self.final_stage_channel, out_channels, 1, 1, 1, self.norm_layer, self.act)),
('avgpool', nn.AdaptiveAvgPool2d((1, 1))),
('flatten', nn.Flatten()),
('dropout', nn.Dropout(p=dropout, inplace=True)),
('classifier', nn.Linear(out_channels, nclass) if nclass else nn.Identity())
]))
def make_stages(self, layer_infos, block):
return [layer for layer_info in layer_infos for layer in self.make_layers(copy.copy(layer_info), block)]
def make_layers(self, layer_info, block):
layers = []
for i in range(layer_info.num_layers):
layers.append(block(layer_info, sd_prob=self.get_sd_prob()))
layer_info.in_ch = layer_info.out_ch
layer_info.stride = 1
return layers
def get_sd_prob(self):
sd_prob = self.stochastic_depth * (self.cur_block / self.num_block)
self.cur_block += 1
return sd_prob
def forward(self, x):
return self.head(self.blocks(self.stem(x)))
def change_dropout_rate(self, p):
self.head[-2] = nn.Dropout(p=p, inplace=True)
def efficientnet_v2_init(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
nn.init.zeros_(m.bias)
def get_efficientnet_v2(model_name, pretrained, nclass=0, dropout=0.1, stochastic_depth=0.2, **kwargs):
residual_config = [MBConvConfig(*layer_config) for layer_config in get_efficientnet_v2_structure(model_name)]
model = EfficientNetV2(residual_config, 1280, nclass, dropout=dropout, stochastic_depth=stochastic_depth, block=MBConv, act_layer=nn.SiLU)
efficientnet_v2_init(model)
if pretrained:
load_from_zoo(model, model_name)
return model | [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Identity",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.ones_",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.zeros_",
"torch.empty",
"torch.nn.Flatten"
] | 1.9.0 | hankyul2/EfficientNetV2-pytorch | bce59dae3ce69e3e7e8aa99e4f32214b015dd1f8 |
1.1 | #!/usr/bin/env python3
import argparse
import random
import torch
from torch import nn, optim
from torch.nn import functional as F
from tqdm import tqdm
import learn2learn as l2l
class Net(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, num_classes, input_dim=768, inner_dim=200, pooler_dropout=0.3):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = nn.ReLU()
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, x, **kwargs):
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = F.log_softmax(self.out_proj(x), dim=1)
return x
def accuracy(predictions, targets):
predictions = predictions.argmax(dim=1)
acc = (predictions == targets).sum().float()
acc /= len(targets)
return acc.item()
def collate_tokens(values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):
"""Convert a list of 1d tensors into a padded 2d tensor."""
size = max(v.size(0) for v in values)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
class _BatchedDataset(torch.utils.data.Dataset):
def __init__(self, batched):
self.sents = [s for s in batched[0]]
self.ys = [y for y in batched[1]]
def __len__(self):
return len(self.ys)
def __getitem__(self, idx):
return (self.sents[idx], self.ys[idx])
def compute_loss(task, roberta, device, learner, loss_func, batch=15):
loss = 0.0
acc = 0.0
for i, (x, y) in enumerate(torch.utils.data.DataLoader(
_BatchedDataset(task), batch_size=batch, shuffle=True, num_workers=0)):
# RoBERTa ENCODING
x = collate_tokens([roberta.encode(sent) for sent in x], pad_idx=1)
with torch.no_grad():
x = roberta.extract_features(x)
x = x[:, 0, :]
# Moving to device
x, y = x.to(device), y.view(-1).to(device)
output = learner(x)
curr_loss = loss_func(output, y)
acc += accuracy(output, y)
loss += curr_loss / len(task)
loss /= len(task)
return loss, acc
def main(lr=0.005, maml_lr=0.01, iterations=1000, ways=5, shots=1, tps=32, fas=5, device=torch.device("cpu"),
download_location="/tmp/text"):
dataset = l2l.text.datasets.NewsClassification(root=download_location, download=True)
dataset = l2l.data.MetaDataset(dataset)
classes = list(range(len(dataset.labels))) # 41 classes
random.shuffle(classes)
train_dataset, validation_dataset, test_dataset = dataset, dataset, dataset
train_gen = l2l.data.TaskDataset(
train_dataset, num_tasks=20000,
task_transforms=[
l2l.data.transforms.FusedNWaysKShots(
train_dataset, n=ways, k=shots, filter_labels=classes[:20]),
l2l.data.transforms.LoadData(train_dataset),
l2l.data.transforms.RemapLabels(train_dataset)],)
validation_gen = l2l.data.TaskDataset(
validation_dataset, num_tasks=20000,
task_transforms=[
l2l.data.transforms.FusedNWaysKShots(
validation_dataset, n=ways, k=shots, filter_labels=classes[20:30]),
l2l.data.transforms.LoadData(validation_dataset),
l2l.data.transforms.RemapLabels(validation_dataset)],)
test_gen = l2l.data.TaskDataset(
test_dataset, num_tasks=20000,
task_transforms=[
l2l.data.transforms.FusedNWaysKShots(
test_dataset, n=ways, k=shots, filter_labels=classes[30:]),
l2l.data.transforms.LoadData(test_dataset),
l2l.data.transforms.RemapLabels(test_dataset)],)
torch.hub.set_dir(download_location)
roberta = torch.hub.load('pytorch/fairseq', 'roberta.base')
roberta.eval()
roberta.to(device)
model = Net(num_classes=ways)
model.to(device)
meta_model = l2l.algorithms.MAML(model, lr=maml_lr)
opt = optim.Adam(meta_model.parameters(), lr=lr)
loss_func = nn.NLLLoss(reduction="sum")
tqdm_bar = tqdm(range(iterations))
accs = []
for _ in tqdm_bar:
iteration_error = 0.0
iteration_acc = 0.0
for _ in range(tps):
learner = meta_model.clone()
train_task, valid_task = train_gen.sample(), validation_gen.sample()
# Fast Adaptation
for _ in range(fas):
train_error, _ = compute_loss(train_task, roberta, device, learner, loss_func, batch=shots * ways)
learner.adapt(train_error)
# Compute validation loss
valid_error, valid_acc = compute_loss(valid_task, roberta, device, learner, loss_func,
batch=shots * ways)
iteration_error += valid_error
iteration_acc += valid_acc
iteration_error /= tps
iteration_acc /= tps
tqdm_bar.set_description("Loss : {:.3f} Acc : {:.3f}".format(iteration_error.item(), iteration_acc))
accs.append(iteration_acc)
# Take the meta-learning step
opt.zero_grad()
iteration_error.backward()
opt.step()
print (f'first and best validation accuracy: {accs[0]:.4f}, {max(accs):.4f}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learn2Learn Text Classification Example')
parser.add_argument('--ways', type=int, default=5, metavar='N',
help='number of ways (default: 5)')
parser.add_argument('--shots', type=int, default=1, metavar='N',
help='number of shots (default: 1)')
parser.add_argument('-tps', '--tasks-per-step', type=int, default=32, metavar='N',
help='tasks per step (default: 32)')
parser.add_argument('-fas', '--fast-adaption-steps', type=int, default=5, metavar='N',
help='steps per fast adaption (default: 5)')
parser.add_argument('--iterations', type=int, default=1000, metavar='N',
help='number of iterations (default: 1000)')
parser.add_argument('--lr', type=float, default=0.005, metavar='LR',
help='learning rate (default: 0.005)')
parser.add_argument('--maml-lr', type=float, default=0.01, metavar='LR',
help='learning rate for MAML (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--download-location', type=str, default="/tmp/text", metavar='S',
help='download location for train data and roberta(default : /tmp/text')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
random.seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
main(lr=args.lr, maml_lr=args.maml_lr, iterations=args.iterations, ways=args.ways, shots=args.shots,
tps=args.tasks_per_step, fas=args.fast_adaption_steps, device=device,
download_location=args.download_location)
| [
"torch.nn.NLLLoss",
"torch.device",
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.no_grad",
"torch.hub.set_dir",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.cuda.is_available",
"torch.hub.load"
] | 1.1.0 | heiseApple/learn2learn | df3c3291b4681440a80a69a7815090a4bd3cd661 |
1.3 | from typing import Tuple, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.filters.kernels import normalize_kernel2d
def compute_padding(kernel_size: Tuple[int, int]) -> List[int]:
"""Computes padding tuple."""
# 4 ints: (padding_left, padding_right,padding_top,padding_bottom)
# https://pytorch.org/docs/stable/nn.html#torch.nn.functional.pad
assert len(kernel_size) == 2, kernel_size
computed = [(k - 1) // 2 for k in kernel_size]
return [computed[1], computed[1], computed[0], computed[0]]
def filter2D(input: torch.Tensor, kernel: torch.Tensor,
border_type: str = 'reflect',
normalized: bool = False) -> torch.Tensor:
r"""Function that convolves a tensor with a kernel.
The function applies a given kernel to a tensor. The kernel is applied
independently at each depth channel of the tensor. Before applying the
kernel, the function applies padding according to the specified mode so
that the output remains in the same shape.
Args:
input (torch.Tensor): the input tensor with shape of
:math:`(B, C, H, W)`.
kernel (torch.Tensor): the kernel to be convolved with the input
tensor. The kernel shape must be :math:`(B, kH, kW)`.
border_type (str): the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``. Default: ``'reflect'``.
normalized (bool): If True, kernel will be L1 normalized.
Return:
torch.Tensor: the convolved tensor of same size and numbers of channels
as the input.
"""
if not isinstance(input, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(input)))
if not isinstance(kernel, torch.Tensor):
raise TypeError("Input kernel type is not a torch.Tensor. Got {}"
.format(type(kernel)))
if not isinstance(border_type, str):
raise TypeError("Input border_type is not string. Got {}"
.format(type(kernel)))
if not len(input.shape) == 4:
raise ValueError("Invalid input shape, we expect BxCxHxW. Got: {}"
.format(input.shape))
if not len(kernel.shape) == 3:
raise ValueError("Invalid kernel shape, we expect BxHxW. Got: {}"
.format(kernel.shape))
borders_list: List[str] = ['constant', 'reflect', 'replicate', 'circular']
if border_type not in borders_list:
raise ValueError("Invalid border_type, we expect the following: {0}."
"Got: {1}".format(borders_list, border_type))
# prepare kernel
b, c, h, w = input.shape
tmp_kernel: torch.Tensor = kernel.to(input.device).to(input.dtype)
tmp_kernel = tmp_kernel.repeat(c, 1, 1, 1)
if normalized:
tmp_kernel = normalize_kernel2d(tmp_kernel)
# pad the input tensor
height, width = tmp_kernel.shape[-2:]
padding_shape: List[int] = compute_padding((height, width))
input_pad: torch.Tensor = F.pad(input, padding_shape, mode=border_type)
# convolve the tensor with the kernel
return F.conv2d(input_pad, tmp_kernel, padding=0, stride=1, groups=c)
| [
"torch.nn.functional.pad",
"torch.nn.functional.conv2d"
] | 1.3.0 | tdchaitanya/kornia | 6dd16563f66f979c7a95846ef86678894b7d54fd |
1.5 | import time
import gym
import numpy as np
import torch
import torch.nn.functional as F
from fireup.algos.ddpg import core
from fireup.utils.logx import EpochLogger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for DDPG agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(
obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs],
)
"""
Deep Deterministic Policy Gradient (DDPG)
"""
def ddpg(
env_fn,
actor_critic=core.ActorCritic,
ac_kwargs=dict(),
seed=0,
steps_per_epoch=5000,
epochs=100,
replay_size=int(1e6),
gamma=0.99,
polyak=0.995,
pi_lr=1e-3,
q_lr=1e-3,
batch_size=100,
start_steps=10000,
act_noise=0.1,
max_ep_len=1000,
logger_kwargs=dict(),
save_freq=1,
):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: The agent's main model which takes some states ``x`` and
and actions ``a`` and returns a tuple of:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Deterministically computes actions
| from policy given states.
``q`` (batch,) | Gives the current estimate of Q* for
| states ``x`` and actions in
| ``a``.
``q_pi`` (batch,) | Gives the composition of ``q`` and
| ``pi`` for states in ``x``:
| q(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
class you provided to DDPG.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
torch.manual_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs["action_space"] = env.action_space
# Main outputs from computation graph
main = actor_critic(in_features=obs_dim, **ac_kwargs)
# Target networks
target = actor_critic(in_features=obs_dim, **ac_kwargs)
target.eval()
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables
var_counts = tuple(
core.count_vars(module) for module in [main.policy, main.q, main]
)
print("\nNumber of parameters: \t pi: %d, \t q: %d, \t total: %d\n" % var_counts)
# Separate train ops for pi, q
pi_optimizer = torch.optim.Adam(main.policy.parameters(), lr=pi_lr)
q_optimizer = torch.optim.Adam(main.q.parameters(), lr=q_lr)
# Initializing targets to match main variables
target.load_state_dict(main.state_dict())
def get_action(o, noise_scale):
pi = main.policy(torch.Tensor(o.reshape(1, -1)))
a = pi.detach().numpy()[0] + noise_scale * np.random.randn(act_dim)
return np.clip(a, -act_limit, act_limit)
def test_agent(n=10):
for _ in range(n):
o, r, d, ep_ret, ep_len = test_env.reset(), 0, False, 0, 0
while not (d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = test_env.step(get_action(o, 0))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
main.eval()
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy (with some noise, via act_noise).
"""
if t > start_steps:
a = get_action(o, act_noise)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len == max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
if d or (ep_len == max_ep_len):
main.train()
"""
Perform all DDPG updates at the end of the trajectory,
in accordance with tuning done by TD3 paper authors.
"""
for _ in range(ep_len):
batch = replay_buffer.sample_batch(batch_size)
(obs1, obs2, acts, rews, done) = (
torch.Tensor(batch["obs1"]),
torch.Tensor(batch["obs2"]),
torch.Tensor(batch["acts"]),
torch.Tensor(batch["rews"]),
torch.Tensor(batch["done"]),
)
_, _, q_pi_targ = target(obs2, acts)
# Bellman backup for Q function
backup = (rews + gamma * (1 - done) * q_pi_targ).detach()
# DDPG Q loss
_, q, _ = main(obs1, acts)
q_loss = F.mse_loss(q, backup)
# Q-learning update
q_optimizer.zero_grad()
q_loss.backward()
q_optimizer.step()
logger.store(LossQ=q_loss.item(), QVals=q.data.numpy())
# DDPG Policy loss
_, _, q_pi = main(obs1, acts)
pi_loss = -q_pi.mean()
# Policy update
pi_optimizer.zero_grad()
pi_loss.backward()
pi_optimizer.step()
logger.store(LossPi=pi_loss.item())
# Polyak averaging for target parameters
for p_main, p_target in zip(main.parameters(), target.parameters()):
p_target.data.copy_(
polyak * p_target.data + (1 - polyak) * p_main.data
)
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs - 1):
logger.save_state({"env": env}, main, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular("Epoch", epoch)
logger.log_tabular("EpRet", with_min_and_max=True)
logger.log_tabular("TestEpRet", with_min_and_max=True)
logger.log_tabular("EpLen", average_only=True)
logger.log_tabular("TestEpLen", average_only=True)
logger.log_tabular("TotalEnvInteracts", t)
logger.log_tabular("QVals", with_min_and_max=True)
logger.log_tabular("LossPi", average_only=True)
logger.log_tabular("LossQ", average_only=True)
logger.log_tabular("Time", time.time() - start_time)
logger.dump_tabular()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, default="HalfCheetah-v2")
parser.add_argument("--hid", type=int, default=300)
parser.add_argument("--l", type=int, default=1)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--seed", "-s", type=int, default=0)
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--exp_name", type=str, default="ddpg")
args = parser.parse_args()
from fireup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ddpg(
lambda: gym.make(args.env),
actor_critic=core.ActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid] * args.l),
gamma=args.gamma,
seed=args.seed,
epochs=args.epochs,
logger_kwargs=logger_kwargs,
)
| [
"torch.manual_seed",
"torch.nn.functional.mse_loss",
"torch.Tensor"
] | 1.5.1 | kashif/spinningup-pytorch | 8f3389c239c94b3ff46453f359061ae30d851ce8 |
1.10 | """
Adapt from:
https://github.com/facebookresearch/barlowtwins/blob/main/main.py
"""
import torch
import torch.nn as nn
from transformers import Wav2Vec2Model
from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices
def off_diagonal(x):
"""
For the purpose of calculation:
return flattened view of the off-diagonal elements of a square matrix
"""
n, m = x.shape
# need to ensure it is matrix
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
class BarlowTwins(nn.Module):
def __init__(self, output_size, lambd, batch_size, device):
super().__init__()
self.output_size = output_size
self.lambd = lambd
self.batch_size = batch_size
self.device = device
# linear layer as projector
# self.linear_layer = nn.Sequential(nn.Linear(1024, 64))
self.dropout = nn.Sequential(nn.Dropout(0.5))
self.wav2vec_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base")
self.wav2vec_model.fc = nn.Identity()
# We will try to use projector in the original paper
# 3-layers projector
proj_layers = []
for layer in range(3):
if layer == 0: # first layer
proj_layers.append(nn.Linear(1024, self.output_size, bias=False))
else:
proj_layers.append(
nn.Linear(self.output_size, self.output_size, bias=False)
)
if layer < 2: # if not the last layer
proj_layers.append(nn.BatchNorm1d(self.output_size))
proj_layers.append(nn.ReLU(inplace=True))
self.projector = nn.Sequential(*proj_layers)
self.bn = nn.BatchNorm1d(self.output_size, affine=False)
def forward(self, input_1, input_2):
# compute masked indices
batch_size, raw_sequence_length = input_1.shape
sequence_length = self.wav2vec_model._get_feat_extract_output_lengths(
raw_sequence_length
)
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length), mask_prob=0.2, mask_length=2
)
mask_time_indices = torch.from_numpy(mask_time_indices).to(self.device)
# compute masked indices
n = input_1.shape[0]
# print("n: \n", n) # 32
output_1 = self.wav2vec_model(
input_1, mask_time_indices=mask_time_indices
).extract_features # [32, 2, 512]
output_1 = output_1.reshape(n, -1) # [32, 1024]
# TODO: try droupout
output_1 = self.dropout(output_1)
# print("output_1: \n", output_1.shape) # 32
# TODO: (batch)normalization version of representation
# output_1 = self.linear_layer(output_1) # [32, 64]
output_1 = self.projector(output_1)
output_2 = self.wav2vec_model(
input_2, mask_time_indices=mask_time_indices
).extract_features
# TODO: remove reshape perphas
output_2 = output_2.reshape(n, -1)
# output_2 = self.linear_layer(output_2)
output_2 = self.projector(output_2)
# TODO: try droupout
output_2 = self.dropout(output_2)
return output_1, output_2
def loss(self, output_1, output_2):
# empirical cross-correlation matrix
c = self.bn(output_1).T @ self.bn(output_2) # [32, 64]
# sum the cross-correlation matrix between all gpus
c.div_(self.batch_size) # 32 is batch size
# torch.distributed.all_reduce(c)
on_diag = torch.diagonal(c).add_(-1).pow(2).sum()
off_diag = off_diagonal(c).pow_(2).sum()
loss_val = on_diag + self.lambd * off_diag
return loss_val
class BarlowTwins_Contrastive(nn.Module):
def __init__(
self, output_size, lambd, triplet_margin, barlowtwins_lambd, batch_size, device
):
super().__init__()
self.output_size = output_size
self.lambd = lambd
self.barlowtwins_lambd = barlowtwins_lambd
self.batch_size = batch_size
self.device = device
self.cosine_similarity = nn.CosineSimilarity()
self.triplet_margin = triplet_margin
# linear layer as projector
# self.linear_layer = nn.Sequential(nn.Linear(1024, 64))
self.dropout = nn.Sequential(nn.Dropout(0.5))
self.wav2vec_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base")
# self.wav2vec_model.fc = nn.Identity()
# 3-layers projector
proj_layers = []
for layer in range(3):
if layer == 0: # first layer
proj_layers.append(nn.Linear(1024, self.output_size, bias=False))
else:
proj_layers.append(
nn.Linear(self.output_size, self.output_size, bias=False)
)
if layer < 2: # if not the last layer
proj_layers.append(nn.BatchNorm1d(self.output_size))
proj_layers.append(nn.ReLU(inplace=True))
self.projector = nn.Sequential(*proj_layers)
self.bn = nn.BatchNorm1d(self.output_size, affine=False)
def forward(self, anchor, positive, negative):
# compute masked indices
n = anchor.shape[0]
batch_size, raw_sequence_length = anchor.shape
sequence_length = self.wav2vec_model._get_feat_extract_output_lengths(
raw_sequence_length
)
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length), mask_prob=0.2, mask_length=2
)
mask_time_indices = torch.from_numpy(mask_time_indices).to(self.device)
anchor_out = self.wav2vec_model(
anchor, mask_time_indices=mask_time_indices
).extract_features
anchor_out = self.dropout(anchor_out)
anchor_out = anchor_out.reshape(n, -1)
anchor_out = self.projector(anchor_out)
positive_out = self.wav2vec_model(
positive, mask_time_indices=mask_time_indices
).extract_features
positive_out = self.dropout(positive_out)
positive_out = positive_out.reshape(n, -1)
positive_out = self.projector(positive_out)
negative_out = self.wav2vec_model(
negative, mask_time_indices=mask_time_indices
).extract_features
negative_out = self.dropout(negative_out)
negative_out = negative_out.reshape(n, -1)
negative_out = self.projector(negative_out)
return anchor_out, positive_out, negative_out
def barlowtwins_loss(self, anchor_out, positive_out):
# empirical cross-correlation matrix
c = self.bn(anchor_out).T @ self.bn(positive_out) # [32, 64]
# sum the cross-correlation matrix between all gpus
# TODO: use argueparser for batch size 32
c.div_(self.batch_size) # 32 is batch size
# torch.distributed.all_reduce(c)
on_diag = torch.diagonal(c).add_(-1).pow(2).sum()
off_diag = off_diagonal(c).pow_(2).sum()
loss_val = on_diag + self.barlowtwins_lambd * off_diag
return loss_val
def triplet_loss(self, anchor_out, positive_out, negative_out, reduction="mean"):
positive_distance = 1 - self.cosine_similarity(anchor_out, positive_out)
negative_distance = 1 - self.cosine_similarity(anchor_out, negative_out)
losses = torch.max(
positive_distance - negative_distance + self.triplet_margin,
torch.full_like(positive_distance, 0),
)
if reduction == "mean":
return torch.mean(losses)
else:
return torch.sum(losses)
def combine_loss(self, barlowtwins_loss, triplet_loss):
return barlowtwins_loss * self.lambd + triplet_loss
| [
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.Dropout",
"torch.diagonal",
"torch.nn.Sequential",
"torch.full_like",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.CosineSimilarity",
"torch.mean",
"torch.sum"
] | 1.10.2 | DigitalPhonetics/SpeechRepresentationFinetuning | 11d7130919888d0a27de61f5075e72f4a024673b |
1.6 | import math
import os
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from typing import Tuple
import numpy as np
import torch
from hivemind.compression.base import CompressionBase, CompressionInfo
from hivemind.proto import runtime_pb2
EXECUTOR = ThreadPoolExecutor(max_workers=int(os.environ.get("QUANTIZATION_THREADS", 128)))
class Quantization(CompressionBase, ABC):
codebook_dtype, indices_dtype = np.float32, np.uint8
@abstractmethod
def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""Convert tensor into a pair of (indices, codebook)"""
...
def compress(self, tensor: torch.Tensor, info: CompressionInfo, allow_inplace: bool = False) -> runtime_pb2.Tensor:
quantized, codebook = self.quantize(tensor.detach(), allow_inplace=allow_inplace)
return runtime_pb2.Tensor(
compression=self.compression_type,
buffer=b"".join((np.int64(len(codebook)).tobytes(), codebook.tobytes(), quantized.tobytes())),
size=tensor.shape,
dtype=tensor.numpy().dtype.name,
requires_grad=tensor.requires_grad,
)
def extract(self, serialized_tensor: runtime_pb2.Tensor) -> torch.Tensor:
codebook_size = int(np.frombuffer(serialized_tensor.buffer, count=1, dtype=np.int64))
codebook = np.frombuffer(serialized_tensor.buffer, offset=8, count=codebook_size, dtype=self.codebook_dtype)
quantized = np.frombuffer(serialized_tensor.buffer, offset=8 + codebook.nbytes, dtype=self.indices_dtype)
quantized = torch.as_tensor(quantized, dtype=torch.int64).reshape(tuple(serialized_tensor.size))
codebook = torch.as_tensor(np.asarray(codebook, dtype=serialized_tensor.dtype))
return codebook[quantized]
def estimate_compression_ratio(self, info: CompressionInfo) -> float:
return self.n_bits / torch.finfo(info.descriptor.dtype).bits
@property
def n_bits(self):
return self.indices_dtype(1).itemsize * 8
@property
def n_bins(self):
return 2**self.n_bits
class Uniform8BitQuantization(Quantization):
RANGE_IN_SIGMAS: int = 6
compression_type = runtime_pb2.UNIFORM_8BIT
def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:
offset = self.n_bins // 2
shift = tensor.mean()
centered_tensor = tensor.sub_(shift) if allow_inplace else tensor - shift
std_unbiased = centered_tensor.norm() / math.sqrt(centered_tensor.numel() - 1)
scale = self.RANGE_IN_SIGMAS * std_unbiased / self.n_bins
quantized = torch.quantize_per_tensor(centered_tensor, scale, offset, torch.quint8).int_repr()
lookup = average_buckets(tensor, quantized, self.n_bins)
return np.asarray(quantized, dtype=self.indices_dtype), np.asarray(lookup, dtype=self.codebook_dtype)
class Quantile8BitQuantization(Quantization):
compression_type = runtime_pb2.QUANTILE_8BIT
def quantize(self, tensor: torch.Tensor, allow_inplace: bool = False) -> Tuple[np.ndarray, np.ndarray]:
tensor = tensor.detach().float()
borders = torch.as_tensor(quantile_qq_approximation(tensor.numpy(), self.n_bins + 1)[1:-1])
quantized = torch.clamp_(torch.bucketize(tensor, borders), 0, self.n_bins - 1)
codebook = average_buckets(tensor, quantized, self.n_bins)
return quantized.numpy().astype(np.uint8), codebook.numpy()
def average_buckets(tensor: torch.Tensor, quant_weight: torch.Tensor, n_bins: int):
"""Return the average value in each bucket"""
bin_sums = torch.zeros(n_bins).scatter_add_(0, quant_weight.flatten().long(), tensor.flatten())
bin_counts = torch.clamp_min_(torch.bincount(quant_weight.flatten(), minlength=n_bins), 1)
lookup = bin_sums / bin_counts
return lookup
def get_chunk_size(num_elements: int, min_chunk_size: int) -> int:
"""Adjust chunk_size to minimize imbalance between chunk sizes"""
if min_chunk_size >= num_elements:
return min_chunk_size
leftover_elements = num_elements % min_chunk_size
num_chunks = num_elements // min_chunk_size
return min_chunk_size + (leftover_elements - 1) // num_chunks + 1
def quantile_qq_approximation(array: np.ndarray, n_quantiles: int, min_chunk_size: int = 10**5) -> np.ndarray:
"""Estimate uniform quantiles of data using quantile-of-quantiles. Runs in parallel."""
if not array.data.c_contiguous and array.data.f_contiguous:
array = array.T
array = np.ascontiguousarray(array.reshape(-1))
quantiles = np.linspace(0.0, 1.0, num=n_quantiles, dtype=array.dtype)
chunk_size = get_chunk_size(len(array), min_chunk_size)
num_chunks = (len(array) - 1) // chunk_size + 1
partition_quantiles = np.empty((num_chunks, len(quantiles)), dtype=array.dtype)
jobs = []
for i in range(num_chunks):
chunk = slice(chunk_size * i, chunk_size * (i + 1))
jobs.append(EXECUTOR.submit(np.quantile, array[chunk], quantiles, out=partition_quantiles[i]))
for job in jobs:
job.result()
return np.quantile(partition_quantiles, quantiles)
| [
"torch.zeros",
"torch.bucketize",
"torch.finfo",
"torch.as_tensor",
"torch.quantize_per_tensor"
] | 1.6.0 | artek0chumak/hivemind | c6b2b2d84ccfc890314a2bfece8eef238372d410 |
1.6 | from __future__ import annotations
import logging
import os
import time
from functools import partial
from typing import Callable, Optional, Sequence, Union
import torch
from hivemind.averaging.control import AveragingStage, StepControl
from hivemind.compression import CompressionBase, NoCompression
from hivemind.dht import DHT
from hivemind.optim.grad_averager import GradientAverager
from hivemind.optim.grad_scaler import GradScaler
from hivemind.optim.progress_tracker import LocalTrainingProgress, ProgressTracker
from hivemind.optim.state_averager import (
LRSchedulerBase,
OptimizerFactory,
Parameters,
ParamGroups,
SchedulerFactory,
TorchOptimizer,
TrainingStateAverager,
)
from hivemind.utils import PerformanceEMA, get_dht_time, get_logger
logger = get_logger(__name__)
class Optimizer(torch.optim.Optimizer):
"""
hivemind.Optimizer wraps your regular PyTorch Optimizer for training collaboratively with peers.
By default, Optimizer is configured to be exactly **equivalent to synchronous training** with target_batch_size.
There are advanced options make training semi-asynchronous (delay_optimizer_step and delay_gradient_averaging)
or even fully asynchronous (use_local_updates=True).
:example: The Optimizer can be used as a drop-in replacement for a regular PyTorch Optimizer:
>>> model = transformers.AutoModel("albert-xxlarge-v2")
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, start=True)
>>> opt = hivemind.Optimizer(dht=dht, run_id="run_42", batch_size_per_step=4, target_batch_size=4096,
>>> params=model.parameters(), optimizer=lambda params: torch.optim.Adam(params))
>>> while True:
>>> loss = compute_loss_on_batch(model, batch_size=4)
>>> opt.zero_grad()
>>> loss.backward()
>>> opt.step() # <-- train collaboratively with any peers that use the same prefix (run_42)
By default, peers will perform the following steps:
* accumulate a minibatch of gradients towards the (global) target batch size, without updating parameters yet;
* after peers collectively accumulate target_batch_size, average gradients with peers and perform optimizer step;
* if your peer lags behind the rest of the swarm, it will download parameters and optimizer state from others;
Unlike regular training, your device may join midway through training, when other peers already made some progress.
For this reason, any learning rate schedulers, curriculum and other **time-dependent features should be based on**
``optimizer.local_epoch`` (and not the number ot calls to opt.step). Otherwise, peers that joined training late
may end up having different learning rates. To do so automatically, specify ``scheduler=...`` parameter below.
:What is an epoch?: Optimizer uses the term ``epoch`` to describe intervals between synchronizations. One epoch
coresponds to processing certain number of training samples (``target_batch_size``) in total across all peers.
Like in PyTorch LR Scheduler, **epoch does not necessarily correspond to a full pass over the training data.**
At the end of epoch, peers perform synchronous actions such as averaging gradients for a global optimizer update,
updating the learning rate scheduler or simply averaging parameters (if using local updates).
The purpose of this is to ensure that changing the number of peers does not require changing hyperparameters.
For instance, if the number of peers doubles, they will run all-reduce more frequently to adjust for faster training.
:Configuration guide: This guide will help you set up your first collaborative training run. It covers the most
important basic options, but ignores features that require significant changes to the training code.
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=IF_BEHIND_FIREWALL_OR_VERY_UNRELIABLE, start=True)
>>> opt = hivemind.Optimizer(
>>> dht=dht, run_id="a_unique_name_that_every_participant_will_see_when_training",
>>> batch_size_per_step=ACTUAL_BATCH_SIZE_OF_THIS_PEER, target_batch_size=LARGE_GLOBAL_BATCH,
>>> # ^--- Each global optimzier step will use gradients from 1x-1.1x of target_batch_size (due to latency);
>>> # It is recommended to train with very large batch sizes to reduce the % of time spent on communication.
>>>
>>> params=params, optimizer=lambda params: AnyPyTorchOptimizer(params, **hyperparams_for_target_batch_size),
>>> # tune learning rate for your target_batch_size. Here's a good reference: https://arxiv.org/abs/1904.00962
>>> scheduler=lambda opt: AnyPyTorchScheduler(opt, **hyperparams_for_target_batch_size),
>>> # scheduler.step will be called automatically each time when peers collectively accumulate target_batch_size
>>>
>>> offload_optimizer=True, # saves GPU memory, but increases RAM usage; Generally a good practice to use this.
>>> delay_grad_averaging=OPTIONAL, delay_optimizer_step=OPTIONAL, # train faster, but with 1 round of staleness;
>>> # setting both to True is equivalent to Delayed Parameter Updates (see https://arxiv.org/abs/2101.06840)
>>>
>>> grad_compression=hivemind.Float16Compression(), state_averaging_compression=hivemind.Float16Compression(),
>>> # ^-- it is usually fine to use pure 16-bit or even lower precision during communication with no precaution;
>>> # See hivemind/examples/albert for an working example of mixed 8/16-bit compression.
>>>
>>> matchmaking_time=15.0, # 3-5s for small local runs, 10-15s for training over the internet or with many peers
>>> averaging_timeout=60.0, # around of 2x the actual time it takes to run all-reduce
>>> verbose=True # periodically report the training progress to the console (e.g. "Averaged with N peers")
>>> ) # and you're done!
:param dht: a running hivemind.DHT instance connected to other peers.
:param run_id: a unique identifier of this training run, used as a common prefix for all DHT keys.
**Note:** peers with the same run_id should *generally* train the same model and use compatible configurations.
Some options can be safely changed by individual peers: ``batch_size_per_step``, ``client_mode``, ``auxiliary``,
``reuse_grad_buffers``, ``offload_optimizer``, and ``verbose``. In some cases, other options may also be tuned
individually by each peer, but they should be changed with caution to avoid deadlocks or convergence issues.
:param target_batch_size: global batch size that must be accumulated before the swarm transitions to the next epoch.
The actual batch may be *slightly* larger due asynchrony (e.g. peers submit more gradients in the last second).
:param batch_size_per_step: you should accumulate gradients over this many samples between calls to optimizer.step.
:param params: parameters or param groups for the optimizer; required if optimizer is a callable(params).
:param optimizer: a callable(parameters) -> pytorch.optim.Optimizer or a pre-initialized PyTorch optimizer.
**Note:** some advanced options like offload_optimizer, delay_optimizer_step, or delay_grad_averaging require
and require the callable and will not work if hivemind.optimizer is created with a pre-existing PyTorch Optimizer.
:param scheduler: callable(optimizer) -> PyTorch LRScheduler or a pre-initialized PyTorch scheduler.
The learning rate scheduler will adjust learning rate based on global epoch, not the number of
local calls to optimizer.step; this is required to keep different peers synchronized.
:param matchmaking_time: when looking for group, wait for peers to join for up to this many seconds.
Increase if you see "averaged gradients with N peers" where N is below 0.9x the real siee on >=25% of epochs.
When training with low-latency network, decreasing matchmaking_time allows training with smaller batch sizes.
:param averaging_timeout: if an averaging step hangs for this long, it will be cancelled automatically.
Increase averaging_timeout if you see "Proceeding with local gradients" at least 25% of the time.
Do not set this timeout too high, as it may cause your optimizer to hang after some types of network errors.
:param allreduce_timeout: timeout for a single attempt to run all-reduce, default: equal to averaging_timeout.
:param load_state_timeout: wait for at most this many seconds before giving up on load_state_from_peers.
:param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation.
This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all
:param offload_optimizer: offload the optimizer to host memory, saving GPU memory for parameters and gradients
:param delay_optimizer_step: run optimizer in background, apply results in future .step; requires offload_optimizer
:param delay_grad_averaging: average gradients in background; requires offload_optimizer and delay_optimizer_step
:param delay_state_averaging: if enabled (default), average parameters and extra tensors in a background thread;
if set to False, average parameters synchronously within the corresponding hivemind.Optimizer.step call.
:param average_state_every: average state (parameters, chosen opt tensors) with peers every this many **epochs**.
This reduces the communication overhead increasing, but can cause parameters to diverge if too large.
The maximal average_state_every=num_epochs depends on how often peers diverge from each other. If peers
hardly ever skip averaging rounds, they can average state less frequently. In turn, network failures, lossy
gradient compression and local_updates cause parameters to diverge faster and requires more frequent averaging.
:param use_local_updates: if enabled, peers will update parameters on each .step using local gradients;
if not enabled (default), accumulate gradients to target_batch_size, and then call .step with averaged gradients.
Even if use_local_updates=True, learning rate scheduler will still be called once per target_batch_size.
:param client_mode: if True, this peer will not accept incoming connections (firewall-compatible mode)
:param auxiliary: if True, optimizer.step will only assist other peers in averaging (for cpu-only workers)
:param grad_compression: compression strategy used for averaging gradients, default = no compression
:param state_averaging_compression: compression for averaging params and state tensors, default = no compression
:param load_state_compression: compression strategy for loading state from peers, default = no compression
:param average_opt_statistics: names of optimizer statistics from state dict that should be averaged with peers
:param extra_tensors: if specified, these extra tensors will also be averaged and shared in load_state_from_peers.
:param averager_opts: additional keyword arguments forwarded to both GradientAverager and TrainingStateAverager
:param tracker_opts: additional keyword arguments forwarded to ProgressTracker
:param performance_ema_alpha: moving average alpha in ProgressTracker, TrainingStateAverager and Optimizer
:param verbose: if True, report internal events such as accumilating gradients and running background tasks
:note: in a large-scale training, peers will inevitably fail and you will see error messages. hivemind.Optimizer
is designed to recover from such failures, but will sometimes need a minute or two to re-adjust.
"""
def __init__(
self,
*,
dht: DHT,
run_id: str,
target_batch_size: int,
batch_size_per_step: Optional[int] = None,
optimizer: Union[TorchOptimizer, OptimizerFactory],
params: Optional[Union[Parameters, ParamGroups]] = None,
scheduler: Optional[Union[LRSchedulerBase, SchedulerFactory]] = None,
matchmaking_time: Optional[float] = 15.0,
averaging_timeout: Optional[float] = 60.0,
allreduce_timeout: Optional[float] = None,
next_chunk_timeout: Optional[float] = None,
load_state_timeout: float = 600.0,
reuse_grad_buffers: bool = False,
offload_optimizer: Optional[bool] = None,
delay_optimizer_step: Optional[bool] = None,
delay_grad_averaging: bool = False,
delay_state_averaging: bool = True,
average_state_every: int = 1,
use_local_updates: bool = False,
client_mode: bool = None,
auxiliary: bool = False,
grad_compression: CompressionBase = NoCompression(),
state_averaging_compression: CompressionBase = NoCompression(),
load_state_compression: CompressionBase = NoCompression(),
average_opt_statistics: Sequence[str] = (),
extra_tensors: Sequence[torch.Tensor] = (),
averager_opts: Optional[dict] = None,
tracker_opts: Optional[dict] = None,
performance_ema_alpha: float = 0.1,
shutdown_timeout: float = 5,
verbose: bool = False,
):
self._parent_pid = os.getpid()
client_mode = client_mode if client_mode is None else dht.client_mode
delay_optimizer_step = delay_optimizer_step if delay_optimizer_step is not None else delay_grad_averaging
offload_optimizer = offload_optimizer if offload_optimizer is not None else (params is not None)
allreduce_timeout = allreduce_timeout if allreduce_timeout is not None else averaging_timeout
next_chunk_timeout = next_chunk_timeout if next_chunk_timeout is not None else matchmaking_time
assert not delay_grad_averaging or delay_optimizer_step, "delay_grad_averaging requires delay_optimizer_step"
assert not (client_mode and auxiliary), "Client-mode peers cannot serve as auxiliaries"
assert not auxiliary or batch_size_per_step is None, "Auxiliary peers should not accumulate batches"
if callable(optimizer) and params is not None:
if scheduler is not None and (not callable(scheduler) or isinstance(scheduler, LRSchedulerBase)):
raise ValueError("For this mode, please provide scheduler factory: callable(optimizer) -> scheduler")
elif all(hasattr(optimizer, attr) for attr in ("param_groups", "step", "zero_grad")):
if offload_optimizer or delay_optimizer_step or delay_grad_averaging:
raise ValueError(
"To enable offload_optimizer or delayed updates, please initialize Optimizer as "
"hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)"
)
else:
raise ValueError(
"Please initialize the optimizer in one of the following two ways:\n"
"(A) hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)\n"
"(B) hivemind.Optimizer(..., optimizer=pre_initialize_optimizer)"
)
if use_local_updates:
assert not reuse_grad_buffers, "if local_updates is True, gradients will not be accumulated"
assert not delay_grad_averaging, "if local_updates is True, gradients will not be averaged"
self.dht, self.run_id, self.client_mode, self.auxiliary = dht, run_id, client_mode, auxiliary
self.batch_size_per_step, self.target_batch_size = batch_size_per_step, target_batch_size
self.delay_state_averaging, self.average_state_every = delay_state_averaging, average_state_every
self.matchmaking_time, self.offload_optimizer = matchmaking_time, offload_optimizer
self.delay_grad_averaging, self.delay_optimizer_step = delay_grad_averaging, delay_optimizer_step
self.averaging_timeout, self.allreduce_timeout = averaging_timeout, allreduce_timeout
self.load_state_timeout, self.shutdown_timeout = load_state_timeout, shutdown_timeout
self.next_chunk_timeout = next_chunk_timeout
self.status_loglevel = logging.INFO if verbose else logging.DEBUG
self.scheduled_grads: Optional[StepControl] = None
self.scheduled_state: Optional[StepControl] = None
self.tracker = self._make_progress_tracker(
target_batch_size, performance_ema_alpha=performance_ema_alpha, **tracker_opts or {}
)
self.state_averager = self._make_state_averager(
optimizer=optimizer,
params=params,
scheduler=scheduler,
delta_rule_averaging=use_local_updates and self.delay_state_averaging,
compression=state_averaging_compression,
state_compression=load_state_compression,
average_opt_statistics=average_opt_statistics,
performance_ema_alpha=performance_ema_alpha,
extra_tensors=extra_tensors,
**averager_opts or {},
)
if not use_local_updates:
self.grad_averager = self._make_gradient_averager(
reuse_grad_buffers=reuse_grad_buffers, compression=grad_compression, **averager_opts or {}
)
else:
self.grad_averager = None
self._should_check_synchronization_on_update = True # used in self.should_load_state_from_peers
self._schema_hash = self._compute_schema_hash()
self.delay_before_state_averaging = PerformanceEMA(alpha=performance_ema_alpha)
# measures the average time from the beginning of self._update_global_epoch to the call to state_averager
# used for pre-scheduling the averaging round in state_averager
self._step_supports_amp_scaling = reuse_grad_buffers
# note: the line above is used by pytorch AMP GradScaler to enable custom behavior needed when reusing gradient
# buffers over multiple steps (to avoid repeated unscaling). Without reuse_grad_buffers, this is not needed.
def _make_state_averager(self, **kwargs) -> TrainingStateAverager:
return TrainingStateAverager(
dht=self.dht,
prefix=f"{self.run_id}_state_averager",
min_matchmaking_time=self.matchmaking_time,
allreduce_timeout=self.allreduce_timeout,
shutdown_timeout=self.shutdown_timeout,
offload_optimizer=self.offload_optimizer,
custom_gradients=self.offload_optimizer,
status_loglevel=self.status_loglevel,
next_chunk_timeout=self.next_chunk_timeout,
client_mode=self.client_mode,
auxiliary=self.auxiliary,
start=True,
**kwargs,
)
def _make_gradient_averager(self, **kwargs) -> GradientAverager:
assert hasattr(self, "state_averager"), "must initialize state averager first"
grad_averager = GradientAverager(
dht=self.dht,
prefix=f"{self.run_id}_grad_averager",
parameters=self.state_averager.main_parameters,
min_matchmaking_time=self.matchmaking_time,
allreduce_timeout=self.allreduce_timeout,
shutdown_timeout=self.shutdown_timeout,
next_chunk_timeout=self.next_chunk_timeout,
client_mode=self.client_mode,
auxiliary=self.auxiliary,
start=True,
**kwargs,
)
if self.offload_optimizer:
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
with grad_averager.get_tensors() as averaged_gradients:
assert len(averaged_gradients) == len(optimized_parameters)
for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):
opt_param.grad = averaged_grad
return grad_averager
def _make_progress_tracker(self, target_batch_size: int, **kwargs) -> ProgressTracker:
return ProgressTracker(
dht=self.dht,
prefix=self.run_id,
target_batch_size=target_batch_size,
client_mode=self.client_mode,
status_loglevel=self.status_loglevel,
start=True,
**kwargs,
)
def _compute_schema_hash(self) -> int:
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
param_shapes = tuple(tuple(param.shape) for param in optimized_parameters)
# offloaded optimizer requires that gradient tensors are reused between iterations
grad_ids = tuple(id(param.grad) for param in optimized_parameters) if self.offload_optimizer else None
return hash((grad_ids, param_shapes))
def is_alive(self) -> bool:
return self.state_averager.is_alive()
@property
def local_epoch(self) -> int:
"""
This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will
automatically re-synchronize by downloading state from another peer.
An epoch corresponds to accumulating target_batch_size across all active devices.
"""
return self.state_averager.local_epoch
@property
def local_progress(self) -> LocalTrainingProgress:
return self.tracker.local_progress
@property
def use_local_updates(self) -> bool:
return self.grad_averager is None
@property
def use_gradient_averaging(self) -> bool:
return self.grad_averager is not None
def step(
self,
closure: Optional[Callable[[], torch.Tensor]] = None,
batch_size: Optional[int] = None,
grad_scaler: Optional[GradScaler] = None,
):
"""
Update training progress after accumulating another local batch size. Depending on the configuration, this will
report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.
:param closure: A closure that reevaluates the model and returns the loss.
:param batch_size: optional override for batch_size_per_step from init.
:param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler.
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if grad_scaler is not None and not isinstance(grad_scaler, GradScaler):
raise ValueError("hivemind.Optimizer requires a hivemind-aware gradient scaler (hivemind.GradScaler)")
if self.batch_size_per_step is None and batch_size is None and not self.auxiliary:
raise ValueError("Please either set batch_size_per_step parameter at init or when calling .step")
if self.auxiliary and (closure is not None or batch_size is not None or grad_scaler is not None):
raise ValueError("Auxiliary peers should not have batch size, run closures, or use grad_scaler")
batch_size = batch_size if batch_size is not None else self.batch_size_per_step
# if delayed updates finished before step, apply these updates; otherwise do nothing
self.state_averager.step(apply_delayed_updates=True)
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if not self.auxiliary and self._should_load_state_from_peers():
logger.log(self.status_loglevel, "Peer is out of sync")
self.load_state_from_peers()
return loss # local gradients were computed with out-of-sync parameters, must start over
if self.use_gradient_averaging:
# accumulate gradients toward target batch size, then aggregate with peers and run optimizer
if not self.auxiliary:
grads_are_valid = self._check_and_accumulate_gradients(batch_size, grad_scaler)
if not grads_are_valid:
return loss # local gradients were reset due to overflow, must start over
self._maybe_schedule_gradient_averaging()
self._maybe_schedule_state_averaging()
else:
# use_local_updates=True: update parameters on every step independently of other peers
if not self.auxiliary:
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
new_samples_accumulated = self.tracker.local_progress.samples_accumulated + batch_size
self.tracker.report_local_progress(self.local_epoch, new_samples_accumulated)
self._maybe_schedule_state_averaging()
self.state_averager.step(
increment_epoch=False,
optimizer_step=True,
delay_optimizer_step=self.delay_optimizer_step,
grad_scaler=grad_scaler,
)
if self.tracker.ready_to_update_epoch:
self._update_global_epoch(grad_scaler)
return loss
def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None:
"""Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step"""
assert self._schema_hash == self._compute_schema_hash(), "parameters or gradients changed during iteration"
_epoch_start_time = time.perf_counter()
with self.tracker.pause_updates():
wait_for_trigger = None
if self.use_gradient_averaging:
logger.log(self.status_loglevel, f"Beginning optimizer step #{self.local_epoch}")
if self.delay_optimizer_step:
self.state_averager.step(wait_for_delayed_updates=True)
began_averaging_gradients = self._begin_averaging_gradients(grad_scaler)
if not began_averaging_gradients:
# failed to start gradient averaging due to an internal error
self.grad_averager.load_accumulators_into_averager_()
elif self.delay_grad_averaging:
# if using delayed grad averaing, send this to state_averager as a pre-condition for optimizer step
wait_for_trigger = partial(self._average_gradients_and_load_into_optimizer, self.scheduled_grads)
else:
# delay_grad_averaging=False, average gradients immediately
self._average_gradients_and_load_into_optimizer(self.scheduled_grads)
next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)
swarm_not_empty = self.tracker.global_progress.num_peers > 1
should_perform_optimizer_step = not self.auxiliary and not self.use_local_updates
should_average_state = (
swarm_not_empty
and next_epoch % self.average_state_every == 0
and not self.state_averager.averaging_in_progress
)
if should_average_state and self.scheduled_state is not None:
if self.scheduled_state.triggered or self.scheduled_state.done():
logger.log(
self.status_loglevel,
f"Not using pre-scheduled group for state averaging because it"
f"was already used elsewhere: {self.scheduled_state}",
)
self.scheduled_state = None
self.delay_before_state_averaging.update(task_size=1, interval=time.perf_counter() - _epoch_start_time)
self.state_averager.step(
increment_epoch=True,
wait_for_trigger=wait_for_trigger,
optimizer_step=should_perform_optimizer_step,
delay_optimizer_step=self.delay_optimizer_step and should_perform_optimizer_step,
grad_scaler=grad_scaler,
averaging_round=should_average_state,
delay_averaging=self.delay_state_averaging and not self.auxiliary,
averaging_control=self.scheduled_state if should_average_state else None,
averaging_opts=dict(timeout=self.averaging_timeout) if should_average_state else None,
)
if not should_average_state and self.scheduled_state is not None and not self.scheduled_state.done():
self.scheduled_state.cancel()
self.scheduled_state = None
self.tracker.update_epoch(new_epoch=self.state_averager.local_epoch)
self._should_check_synchronization_on_update = True
# the above line ensures that peers check for *strict* synchronization once per epoch
if not self.client_mode:
self.state_averager.state_sharing_priority = self.local_epoch
if self.use_gradient_averaging and not self.auxiliary:
self.grad_averager.reset_accumulated_grads_()
if not self.client_mode:
self.grad_averager.state_sharing_priority = self.local_epoch
logger.log(self.status_loglevel, f"Transitioning to epoch {self.local_epoch}")
def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool:
"""Begin an all-reduce round to average gradients; return True if succeeded, False if failed"""
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
began_averaging_gradients = False
if self.scheduled_grads is not None and (self.scheduled_grads.triggered or self.scheduled_grads.done()):
logger.log(
self.status_loglevel,
f"Not using pre-scheduled group for state averaging because it"
f"was already used elsewhere: {self.scheduled_state}",
)
self.scheduled_grads = None
elif self.tracker.global_progress.num_peers > 1:
try:
self.scheduled_grads = self.grad_averager.step(
control=self.scheduled_grads, reset_accumulators=True, wait=False
)
began_averaging_gradients = True
except BaseException as e:
logger.exception(e)
if not began_averaging_gradients and self.scheduled_grads is not None and not self.scheduled_grads.done():
if self.tracker.global_progress.num_peers > 1:
logger.log(self.status_loglevel, f"Tagging along for a pre-scheduled gradient averaging round")
self._tag_along_with_zero_weight(self.scheduled_grads)
else:
logger.log(self.status_loglevel, f"Skipping pre-scheduled averaging round: there are no other peers")
self._load_local_gradients_into_optimizer()
self.scheduled_grads.cancel()
self.scheduled_grads = None
return began_averaging_gradients
def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool:
"""Check if gradients are valid, accumulate and return True; otherwise, reset and return False"""
assert not self.use_local_updates and not self.auxiliary
if grad_scaler is not None and not grad_scaler.are_grads_finite(self):
logger.log(self.status_loglevel, "Encountered incorrect value in fp16 grads, resetting local gradients")
self.tracker.report_local_progress(self.local_epoch, samples_accumulated=0)
self.grad_averager.reset_accumulated_grads_()
return False
self.grad_averager.accumulate_grads_(batch_size)
self.tracker.report_local_progress(self.local_epoch, self.grad_averager.local_samples_accumulated)
return True
def _maybe_schedule_gradient_averaging(self) -> None:
"""If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch"""
assert self.use_gradient_averaging
if self.tracker.estimated_next_update_time - get_dht_time() <= self.matchmaking_time:
if self.scheduled_grads is None or self.scheduled_grads.triggered or self.scheduled_grads.done():
eta_seconds = self.tracker.estimated_next_update_time - get_dht_time()
eta_seconds = max(eta_seconds, self.grad_averager.matchmaking_kwargs["min_matchmaking_time"])
logger.log(self.status_loglevel, f"Pre-scheduling gradient averaging round in {eta_seconds:.2f} sec")
self.scheduled_grads = self.grad_averager.schedule_step(timeout=self.averaging_timeout)
def _maybe_schedule_state_averaging(self) -> None:
"""If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start"""
next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)
if next_epoch % self.average_state_every != 0:
return # averaging is not performed at this epoch
if self.state_averager.averaging_in_progress:
return # previous run is still in progress
if self.delay_before_state_averaging.num_updates == 0:
return # not enough data to accurately pre-schedule
estimated_time = self.tracker.estimated_next_update_time
estimated_time += self.delay_before_state_averaging.ema_seconds_per_sample
estimated_time += self.state_averager.delay_before_averaging.ema_seconds_per_sample
eta_seconds_to_averaging = estimated_time - get_dht_time()
if eta_seconds_to_averaging <= self.matchmaking_time:
if self.scheduled_state is None or self.scheduled_state.triggered or self.scheduled_state.done():
min_matchmaking_time = self.state_averager.matchmaking_kwargs["min_matchmaking_time"]
actual_seconds = max(eta_seconds_to_averaging, min_matchmaking_time)
logger.log(self.status_loglevel, f"Pre-scheduling state averaging round in {actual_seconds:.2f} sec")
self.scheduled_state = self.state_averager.schedule_step(
gather=next_epoch, timeout=self.averaging_timeout
)
def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]):
"""Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients"""
assert self.use_gradient_averaging and maybe_step_control is None or maybe_step_control.triggered
averaged_gradients = False
try:
if maybe_step_control is not None:
group_info = maybe_step_control.result(self.averaging_timeout)
logger.log(self.status_loglevel, f"Averaged gradients with {len(group_info)} peers")
self._load_averaged_gradients_into_optimizer_()
averaged_gradients = True
else:
logger.log(self.status_loglevel, f"Skipped averaging: there are no other peers")
except BaseException as e:
logger.log(self.status_loglevel, f"Averaging gradients failed with {repr(e)}")
if not averaged_gradients:
self._load_local_gradients_into_optimizer()
def _load_averaged_gradients_into_optimizer_(self):
"""If required, load averaged gradients into optimizer; otherwise simply notify grad averager"""
assert self.use_gradient_averaging
if self.offload_optimizer:
pass # averaged gradients are already baked into optimizer, see _make_gradient_averager
else:
# copy averaged gradients into optimizer .grad buffers
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
with torch.no_grad(), self.grad_averager.get_tensors() as averaged_gradients:
assert len(averaged_gradients) == len(optimized_parameters)
for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):
opt_param.grad.copy_(averaged_grad, non_blocking=True)
self.grad_averager.notify_used_averaged_gradients()
def _load_local_gradients_into_optimizer(self):
"""Fallback to using local gradients in the optimizer (instead of averaged gradients)"""
logger.log(self.status_loglevel, f"Proceeding with local gradients")
self.grad_averager.load_accumulators_into_averager_()
# note: we load gradients into grad_averager even though there is only one peer because of two reasons:
# - if offload_optimizer, then we must load gradients onto the CPU gradient buffers used by the optimizer
# - if not offload_optimizer, we must un-scale gradients (divide them by the number of accumulation steps)
self._load_averaged_gradients_into_optimizer_()
def zero_grad(self, set_to_none: bool = False):
"""Reset gradients from model. If reuse_grad_buffers=True, this will raise an error."""
if self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers:
raise ValueError(
f"When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never "
f"call zero_grad manually. Gradients will be refreshed internally"
)
for param_group in self.param_groups:
for param in param_group["params"]:
if param.grad is None:
pass
elif set_to_none:
param.grad = None
else:
param.grad.zero_()
def _should_load_state_from_peers(self) -> bool:
"""
If true, peer will discard local progress and attempt to download state from peers.
This method allows peer to continue training in two cases:
- peer is on the same epoch as other collaborators - keep training normally
- peer was on the same epoch and accumulated some grads, but some collaborators
have just transitioned to the next epoch - this peer should also transition.
:note: The latter case occurs due to the lack of network synchrony: the first peer that
detects enough samples will transition to the next step and start counting samples anew.
Some other peers may take time before they check with DHT and observe that
- the global epoch is technically one epoch ahead of the current one and
- the remaining (non-transitioned) peers no longer have target_batch_size between them
If this is the case, peer should transition to the next epoch and does *not* need to re-load state.
"""
if self._should_check_synchronization_on_update and self.tracker.fetched_global_progress_this_epoch.is_set():
self._should_check_synchronization_on_update = False
return self.local_epoch != self.tracker.global_epoch # require exact synchronization once per step
return self.local_epoch < self.tracker.global_epoch - 1 # catch up if a peer just switched to next epoch
def is_synchronized_with_peers(self) -> bool:
"""Checks whether the current peer is up-to-date with others in terms of the epoch (step) number."""
return self.local_epoch >= self.tracker.global_epoch - 1
def load_state_from_peers(self, **kwargs):
"""
Attempt to load the newest collaboration state from other peers within the same run_id.
If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.
"""
# note: we tag along for the next all-reduce because the run may have already started and cancelling it
# will cause peers to restart matchmaking and may stall the entire collaboration for a few seconds.
if self.scheduled_grads is not None and not self.scheduled_grads.done():
self._tag_along_with_zero_weight(self.scheduled_grads)
self.scheduled_grads = None
self.state_averager.step(wait_for_delayed_updates=True)
with self.tracker.pause_updates():
while True:
try:
self.state_averager.load_state_from_peers(timeout=self.load_state_timeout, **kwargs)
break
except KeyboardInterrupt:
raise
except BaseException as e:
logger.exception(f"Failed to load state from peers: {e}, retrying ...")
continue
if self.tracker.global_epoch - 1 <= self.local_epoch < self.tracker.global_epoch:
logger.log(self.status_loglevel, f"Catching up with collaboration step {self.tracker.global_epoch}")
self.state_averager.local_epoch = self.tracker.global_epoch
self.tracker.report_local_progress(local_epoch=self.local_epoch, samples_accumulated=0)
if not self.client_mode:
self.state_averager.state_sharing_priority = self.local_epoch
if self.use_gradient_averaging:
self.grad_averager.reset_accumulated_grads_()
if not self.client_mode:
self.grad_averager.state_sharing_priority = self.local_epoch
def state_dict(self) -> dict:
state_dict = self.state_averager.optimizer.state_dict()
state_dict["state"]["local_epoch"] = self.local_epoch
return state_dict
def load_state_dict(self, state_dict: dict):
if "local_epoch" in state_dict["state"]:
self.state_averager.local_epoch = state_dict["state"].pop("local_epoch")
return self.state_averager.optimizer.load_state_dict(state_dict)
@property
def state(self):
return dict(self.state_averager.optimizer.state, local_epoch=self.local_epoch)
@property
def opt(self) -> TorchOptimizer:
return self.state_averager.optimizer
@property
def param_groups(self) -> ParamGroups:
next_index = 0
param_groups = tuple(dict(param_group) for param_group in self.state_averager.optimizer.param_groups)
for param_group in param_groups:
num_params = len(param_group["params"])
main_params_for_group = self.state_averager.main_parameters[next_index : next_index + num_params]
param_group["params"] = main_params_for_group
next_index += num_params
assert next_index == len(self.state_averager.main_parameters)
return param_groups
def add_param_group(self, param_group: dict) -> None:
raise ValueError(
f"{self.__class__.__name__} does not support calling add_param_group after creation. "
f"Please provide all parameter groups at init"
)
def __repr__(self):
return f"{self.__class__.__name__}(prefix={self.run_id}, epoch={self.local_epoch})"
def _tag_along_with_zero_weight(self, control: StepControl):
"""Wait for a running averaging round to finish with zero weight."""
if not control.triggered:
control.weight = 0
control.allow_allreduce()
if not control.done():
try:
control.result(self.averaging_timeout)
except BaseException as e:
logger.exception(e)
if not control.done():
control.cancel()
def shutdown(self):
logger.log(self.status_loglevel, "Sending goodbye to peers...")
self.tracker.shutdown(self.shutdown_timeout)
self.state_averager.step(wait_for_delayed_updates=True)
for scheduled_round in self.scheduled_grads, self.scheduled_state:
if scheduled_round is not None:
if scheduled_round.stage == AveragingStage.LOOKING_FOR_GROUP:
scheduled_round.cancel()
else:
self._tag_along_with_zero_weight(scheduled_round)
logger.log(self.status_loglevel, "Shutting down averagers...")
self.state_averager.shutdown()
if self.use_gradient_averaging:
self.grad_averager.shutdown()
logger.log(self.status_loglevel, f"{self.__class__.__name__} is shut down")
def __del__(self):
if self._parent_pid == os.getpid() and self.is_alive():
self.shutdown()
| [
"torch.enable_grad",
"torch.no_grad"
] | 1.6.0 | artek0chumak/hivemind | 762f116ffcd6c194b888ed64c8a82033cc97dce7 |
0.4 | from os import path
import torch
import torch.utils.data as data
class CacheClassLabel(data.Dataset):
"""
A dataset wrapper that has a quick access to all labels of data.
"""
def __init__(self, dataset):
super(CacheClassLabel, self).__init__()
self.dataset = dataset
self.labels = torch.LongTensor(len(dataset)).fill_(-1)
label_cache_filename = path.join(dataset.root, dataset.__module__+'_'+str(len(dataset))+'.pth')
if path.exists(label_cache_filename):
self.labels = torch.load(label_cache_filename)
else:
for i, data in enumerate(dataset):
self.labels[i] = data[1]
torch.save(self.labels, label_cache_filename)
self.number_classes = len(torch.unique(self.labels))
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img,target = self.dataset[index]
return img, target
class AppendName(data.Dataset):
"""
A dataset wrapper that also return the name of the dataset/task
"""
def __init__(self, dataset, name, first_class_ind=0):
super(AppendName,self).__init__()
self.dataset = dataset
self.name = name
self.first_class_ind = first_class_ind # For remapping the class index
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img,target = self.dataset[index]
target = target + self.first_class_ind
return img, target, self.name
class Subclass(data.Dataset):
"""
A dataset wrapper that return the task name and remove the offset of labels (Let the labels start from 0)
"""
def __init__(self, dataset, class_list, remap=True):
'''
:param dataset: (CacheClassLabel)
:param class_list: (list) A list of integers
:param remap: (bool) Ex: remap class [2,4,6 ...] to [0,1,2 ...]
'''
super(Subclass,self).__init__()
assert isinstance(dataset, CacheClassLabel), 'dataset must be wrapped by CacheClassLabel'
self.dataset = dataset
self.class_list = class_list
self.remap = remap
self.indices = []
for c in class_list:
self.indices.extend((dataset.labels==c).nonzero().flatten().tolist())
if remap:
self.class_mapping = {c: i for i, c in enumerate(class_list)}
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
img,target = self.dataset[self.indices[index]]
if self.remap:
raw_target = target.item() if isinstance(target,torch.Tensor) else target
target = self.class_mapping[raw_target]
return img, target
class Permutation(data.Dataset):
"""
A dataset wrapper that permute the position of features
"""
def __init__(self, dataset, permute_idx):
super(Permutation,self).__init__()
self.dataset = dataset
self.permute_idx = permute_idx
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
img,target = self.dataset[index]
shape = img.size()
img = img.view(-1)[self.permute_idx].view(shape)
return img, target
class Storage(data.Subset):
def reduce(self, m):
self.indices = self.indices[:m]
| [
"torch.save",
"torch.unique",
"torch.load"
] | 0.4.1 | parvex/residual-continual-learning-benchmark | 8eeb2e57ecf0711e075eb02e8ed06fc8e7b9f20d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.