version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.4 | """
Base loss definitions
"""
from collections import OrderedDict
import copy
import torch
import torch.nn as nn
from mixmo.utils import misc, logger
LOGGER = logger.get_logger(__name__, level="DEBUG")
class AbstractLoss(nn.modules.loss._Loss):
"""
Base loss class defining printing and logging utilies
"""
def __init__(self, config_args, device, config_loss=None):
self.device = device
self.config_args = config_args or {}
self.config_loss = config_loss or {}
self.name = self.config_loss["display_name"]
nn.modules.loss._Loss.__init__(self)
def print_details(self):
LOGGER.info(f"Using loss: {self.config_loss} with name: {self.name}")
def start_accumulator(self):
self._accumulator_loss = 0
self._accumulator_len = 0
def get_accumulator_stats(self, format="short", split=None):
"""
Gather tracked stats into a dictionary as formatted strings
"""
if not self._accumulator_len:
return {}
stats = OrderedDict({})
loss_value = self._accumulator_loss / self._accumulator_len
if format == "long":
assert split is not None
key = split + "/" + self.name
stats[key] = {
"value": loss_value,
"string": f"{loss_value:.5}",
}
else:
# make it as short as possibe to fit on one line of tqdm postfix
loss_string = f"{loss_value:.3}".replace("e-0", "-").replace("e-", "-")
stats[self.name] = loss_string
return stats
def forward(self, input, target):
current_loss = self._forward(input, target)
self._accumulator_loss += current_loss.detach().to("cpu").numpy()
self._accumulator_len += 1
return current_loss
def _forward(self, input, target):
raise NotImplementedError
class SoftCrossEntropyLoss(AbstractLoss):
"""
Soft CrossEntropy loss that specifies the proper forward function for AbstractLoss
"""
def _forward(self, input, target):
"""
Cross entropy that accepts soft targets
Args:
pred: predictions for neural network
targets: targets, can be soft
size_average: if false, sum is returned instead of mean
Examples::
input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])
input = torch.autograd.Variable(out, requires_grad=True)
target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
target = torch.autograd.Variable(y1)
loss = cross_entropy(input, target)
loss.backward()
"""
if len(target.size()) == 1:
target = torch.nn.functional.one_hot(target, num_classes=input.size(-1))
target = target.to(torch.float).to(self.device)
logsoftmax = torch.nn.LogSoftmax(dim=1)
return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))
DICT_LOSS_STANDARD = {
"soft_cross_entropy": SoftCrossEntropyLoss,
}
class WrapperLoss(AbstractLoss):
"""
Wrapper around the multiple losses. Initialized from listloss.
"""
def __init__(self, config_loss, config_args, device):
AbstractLoss.__init__(
self,
config_args=config_args,
config_loss=config_loss,
device=device,
)
self.losses = self._init_get_losses()
self.regularized_network = None
def _init_get_losses(self):
"""
Initialize and gather losses from listloss
"""
losses = []
for ic, config_loss in enumerate(self.config_loss["listloss"]):
if config_loss["coeff"] == "<num_members":
config_loss["coeff"] = (1. if ic < self.config_args["num_members"] else 0)
if config_loss["coeff"] == 0:
LOGGER.debug(f"Skip loss: {config_loss}")
continue
loss_callable = get_loss(config_loss, device=self.device, config_args=self.config_args)
loss = copy.deepcopy(config_loss)
loss["callable"] = loss_callable
losses.append(loss)
return losses
def print_details(self):
return
def start_accumulator(self):
AbstractLoss.start_accumulator(self)
for loss in self.losses:
loss["callable"].start_accumulator()
def get_accumulator_stats(self, format="short", split=None):
"""
Gather tracked stats into a dictionary as formatted strings
"""
if not self._accumulator_len:
return {}
stats = AbstractLoss.get_accumulator_stats(self, format=format, split=split)
if format == "long":
# tensorboard logs
if self.config_loss.get("l2_reg"):
l2_reg = self.l2_reg().detach().to("cpu").numpy()
stats["general/l2_reg"] = {
"value": l2_reg,
"string": f"{l2_reg:.4}",
}
for loss in self.losses:
substats = loss["callable"].get_accumulator_stats(
format=format,
split=split,
)
misc.clean_update(stats, substats)
return stats
def _forward(self, input, target):
"""
Perform loss forwards for each sublosses and l2 reg
"""
computed_losses = [self._forward_subloss(loss, input, target) for loss in self.losses]
stacked_computed_losses = torch.stack(computed_losses)
final_loss = stacked_computed_losses.sum()
if self.config_loss.get("l2_reg"):
final_loss = final_loss + self.l2_reg() * float(self.config_loss.get("l2_reg"))
return final_loss
def _forward_subloss(self, loss, input, target):
"""
Standard loss forward for one of the sublosses
"""
coeff = float(loss["coeff"])
subloss_input = self._match_item(loss["input"], dict_tensors=input)
subloss_target = self._match_item(loss["target"], dict_tensors=target)
loss = loss["callable"](input=subloss_input, target=subloss_target)
return loss * coeff
@staticmethod
def _match_item(name, dict_tensors):
if misc.is_none(name):
return None
if name in dict_tensors:
return dict_tensors[str(name)]
raise ValueError(name)
def set_regularized_network(self, network):
if self.config_loss.get("l2_reg"):
self.regularized_network = network
LOGGER.warning(f"Set l2 regularization on {network.__class__.__name__}")
def l2_reg(self,):
"""
Compute l2 regularization/weight decay over the non-excluded parameters
"""
assert self.regularized_network is not None
# Retrieve non excluded parameters
params = list(self.regularized_network.parameters())
# Iterate over all parameters to decay
l2_reg = None
for W in params:
if l2_reg is None:
l2_reg = torch.sum(torch.pow(W, 2))
else:
l2_reg = l2_reg + torch.sum(torch.pow(W, 2))
assert l2_reg is not None
return l2_reg
def get_loss(config_loss, device=None, config_args=None):
"""
Construct loss object, wrapped if there are multiple losses
"""
loss_name = config_loss["name"]
if loss_name == "multitask":
loss = WrapperLoss(config_args=config_args, device=device, config_loss=config_loss)
elif loss_name in DICT_LOSS_STANDARD:
loss = DICT_LOSS_STANDARD[loss_name](
config_loss=config_loss, config_args=config_args, device=device
)
else:
raise Exception(f"Loss {loss_name} not implemented")
loss.print_details()
return loss
| [
"torch.nn.LogSoftmax",
"torch.nn.modules.loss._Loss.__init__",
"torch.stack",
"torch.pow"
] | 1.4.0 | JiarunLiu/mixmo-pytorch | a9ad674122d9b6512094b8292280a4045bb5a400 |
1.4 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import warnings
import torch
import pyro
import pyro.poutine as poutine
from pyro.ops.stats import fit_generalized_pareto
from .abstract_infer import TracePosterior
from .enum import get_importance_trace
class Importance(TracePosterior):
"""
:param model: probabilistic model defined as a function
:param guide: guide used for sampling defined as a function
:param num_samples: number of samples to draw from the guide (default 10)
This method performs posterior inference by importance sampling
using the guide as the proposal distribution.
If no guide is provided, it defaults to proposing from the model's prior.
"""
def __init__(self, model, guide=None, num_samples=None):
"""
Constructor. default to num_samples = 10, guide = model
"""
super().__init__()
if num_samples is None:
num_samples = 10
warnings.warn("num_samples not provided, defaulting to {}".format(num_samples))
if guide is None:
# propose from the prior by making a guide from the model by hiding observes
guide = poutine.block(model, hide_types=["observe"])
self.num_samples = num_samples
self.model = model
self.guide = guide
def _traces(self, *args, **kwargs):
"""
Generator of weighted samples from the proposal distribution.
"""
for i in range(self.num_samples):
guide_trace = poutine.trace(self.guide).get_trace(*args, **kwargs)
model_trace = poutine.trace(
poutine.replay(self.model, trace=guide_trace)).get_trace(*args, **kwargs)
log_weight = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
yield (model_trace, log_weight)
def get_log_normalizer(self):
"""
Estimator of the normalizing constant of the target distribution.
(mean of the unnormalized weights)
"""
# ensure list is not empty
if self.log_weights:
log_w = torch.tensor(self.log_weights)
log_num_samples = torch.log(torch.tensor(self.num_samples * 1.))
return torch.logsumexp(log_w - log_num_samples, 0)
else:
warnings.warn("The log_weights list is empty, can not compute normalizing constant estimate.")
def get_normalized_weights(self, log_scale=False):
"""
Compute the normalized importance weights.
"""
if self.log_weights:
log_w = torch.tensor(self.log_weights)
log_w_norm = log_w - torch.logsumexp(log_w, 0)
return log_w_norm if log_scale else torch.exp(log_w_norm)
else:
warnings.warn("The log_weights list is empty. There is nothing to normalize.")
def get_ESS(self):
"""
Compute (Importance Sampling) Effective Sample Size (ESS).
"""
if self.log_weights:
log_w_norm = self.get_normalized_weights(log_scale=True)
ess = torch.exp(-torch.logsumexp(2*log_w_norm, 0))
else:
warnings.warn("The log_weights list is empty, effective sample size is zero.")
ess = 0
return ess
def vectorized_importance_weights(model, guide, *args, **kwargs):
"""
:param model: probabilistic model defined as a function
:param guide: guide used for sampling defined as a function
:param num_samples: number of samples to draw from the guide (default 1)
:param int max_plate_nesting: Bound on max number of nested :func:`pyro.plate` contexts.
:param bool normalized: set to True to return self-normalized importance weights
:returns: returns a ``(num_samples,)``-shaped tensor of importance weights
and the model and guide traces that produced them
Vectorized computation of importance weights for models with static structure::
log_weights, model_trace, guide_trace = \\
vectorized_importance_weights(model, guide, *args,
num_samples=1000,
max_plate_nesting=4,
normalized=False)
"""
num_samples = kwargs.pop("num_samples", 1)
max_plate_nesting = kwargs.pop("max_plate_nesting", None)
normalized = kwargs.pop("normalized", False)
if max_plate_nesting is None:
raise ValueError("must provide max_plate_nesting")
max_plate_nesting += 1
def vectorize(fn):
def _fn(*args, **kwargs):
with pyro.plate("num_particles_vectorized", num_samples, dim=-max_plate_nesting):
return fn(*args, **kwargs)
return _fn
model_trace, guide_trace = get_importance_trace(
"flat", max_plate_nesting, vectorize(model), vectorize(guide), args, kwargs)
guide_trace.pack_tensors()
model_trace.pack_tensors(guide_trace.plate_to_symbol)
if num_samples == 1:
log_weights = model_trace.log_prob_sum() - guide_trace.log_prob_sum()
else:
wd = guide_trace.plate_to_symbol["num_particles_vectorized"]
log_weights = 0.
for site in model_trace.nodes.values():
if site["type"] != "sample":
continue
log_weights += torch.einsum(site["packed"]["log_prob"]._pyro_dims + "->" + wd,
[site["packed"]["log_prob"]])
for site in guide_trace.nodes.values():
if site["type"] != "sample":
continue
log_weights -= torch.einsum(site["packed"]["log_prob"]._pyro_dims + "->" + wd,
[site["packed"]["log_prob"]])
if normalized:
log_weights = log_weights - torch.logsumexp(log_weights)
return log_weights, model_trace, guide_trace
@torch.no_grad()
def psis_diagnostic(model, guide, *args, **kwargs):
"""
Computes the Pareto tail index k for a model/guide pair using the technique
described in [1], which builds on previous work in [2]. If :math:`0 < k < 0.5`
the guide is a good approximation to the model posterior, in the sense
described in [1]. If :math:`0.5 \\le k \\le 0.7`, the guide provides a suboptimal
approximation to the posterior, but may still be useful in practice. If
:math:`k > 0.7` the guide program provides a poor approximation to the full
posterior, and caution should be used when using the guide. Note, however,
that a guide may be a poor fit to the full posterior while still yielding
reasonable model predictions. If :math:`k < 0.0` the importance weights
corresponding to the model and guide appear to be bounded from above; this
would be a bizarre outcome for a guide trained via ELBO maximization. Please
see [1] for a more complete discussion of how the tail index k should be
interpreted.
Please be advised that a large number of samples may be required for an
accurate estimate of k.
Note that we assume that the model and guide are both vectorized and have
static structure. As is canonical in Pyro, the args and kwargs are passed
to the model and guide.
References
[1] 'Yes, but Did It Work?: Evaluating Variational Inference.'
Yuling Yao, Aki Vehtari, Daniel Simpson, Andrew Gelman
[2] 'Pareto Smoothed Importance Sampling.'
Aki Vehtari, Andrew Gelman, Jonah Gabry
:param callable model: the model program.
:param callable guide: the guide program.
:param int num_particles: the total number of times we run the model and guide in
order to compute the diagnostic. defaults to 1000.
:param max_simultaneous_particles: the maximum number of simultaneous samples drawn
from the model and guide. defaults to `num_particles`. `num_particles` must be
divisible by `max_simultaneous_particles`. compute the diagnostic. defaults to 1000.
:param int max_plate_nesting: optional bound on max number of nested :func:`pyro.plate`
contexts in the model/guide. defaults to 7.
:returns float: the PSIS diagnostic k
"""
num_particles = kwargs.pop('num_particles', 1000)
max_simultaneous_particles = kwargs.pop('max_simultaneous_particles', num_particles)
max_plate_nesting = kwargs.pop('max_plate_nesting', 7)
if num_particles % max_simultaneous_particles != 0:
raise ValueError("num_particles must be divisible by max_simultaneous_particles.")
N = num_particles // max_simultaneous_particles
log_weights = [vectorized_importance_weights(model, guide, num_samples=max_simultaneous_particles,
max_plate_nesting=max_plate_nesting,
*args, **kwargs)[0] for _ in range(N)]
log_weights = torch.cat(log_weights)
log_weights -= log_weights.max()
log_weights = torch.sort(log_weights, descending=False)[0]
cutoff_index = - int(math.ceil(min(0.2 * num_particles, 3.0 * math.sqrt(num_particles)))) - 1
lw_cutoff = max(math.log(1.0e-15), log_weights[cutoff_index])
lw_tail = log_weights[log_weights > lw_cutoff]
if len(lw_tail) < 10:
warnings.warn("Not enough tail samples to compute PSIS diagnostic; increase num_particles.")
k = float('inf')
else:
k, _ = fit_generalized_pareto(lw_tail.exp() - math.exp(lw_cutoff))
return k
| [
"torch.cat",
"torch.einsum",
"torch.no_grad",
"torch.logsumexp",
"torch.tensor",
"torch.exp",
"torch.sort"
] | 1.4.0 | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 |
1.4 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from pyro.distributions import Categorical
from pyro.distributions.torch_distribution import TorchDistributionMixin
from pyro.ops.indexing import Vindex
from pyro.util import ignore_jit_warnings
from .messenger import Messenger
from .runtime import _ENUM_ALLOCATOR
def _tmc_mixture_sample(msg):
dist, num_samples = msg["fn"], msg["infer"].get("num_samples")
# find batch dims that aren't plate dims
batch_shape = [1] * len(dist.batch_shape)
for f in msg["cond_indep_stack"]:
if f.vectorized:
batch_shape[f.dim] = f.size if f.size > 0 else dist.batch_shape[f.dim]
batch_shape = tuple(batch_shape)
# sample a batch
sample_shape = (num_samples,)
fat_sample = dist(sample_shape=sample_shape) # TODO thin before sampling
assert fat_sample.shape == sample_shape + dist.batch_shape + dist.event_shape
assert any(d > 1 for d in fat_sample.shape)
target_shape = (num_samples,) + batch_shape + dist.event_shape
# if this site has any possible ancestors, sample ancestor indices uniformly
thin_sample = fat_sample
if thin_sample.shape != target_shape:
index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1)
squashed_dims = []
for squashed_dim, squashed_size in zip(range(1, len(thin_sample.shape)), thin_sample.shape[1:]):
if squashed_size > 1 and (target_shape[squashed_dim] == 1 or squashed_dim == 0):
# uniformly sample one ancestor per upstream particle population
ancestor_dist = Categorical(logits=torch.zeros((squashed_size,), device=thin_sample.device))
ancestor_index = ancestor_dist.sample(sample_shape=(num_samples,))
index[squashed_dim] = ancestor_index
squashed_dims.append(squashed_dim)
thin_sample = Vindex(thin_sample)[tuple(index)]
for squashed_dim in squashed_dims:
thin_sample = thin_sample.unsqueeze(squashed_dim)
assert thin_sample.shape == target_shape
return thin_sample
def _tmc_diagonal_sample(msg):
dist, num_samples = msg["fn"], msg["infer"].get("num_samples")
# find batch dims that aren't plate dims
batch_shape = [1] * len(dist.batch_shape)
for f in msg["cond_indep_stack"]:
if f.vectorized:
batch_shape[f.dim] = f.size if f.size > 0 else dist.batch_shape[f.dim]
batch_shape = tuple(batch_shape)
# sample a batch
sample_shape = (num_samples,)
fat_sample = dist(sample_shape=sample_shape) # TODO thin before sampling
assert fat_sample.shape == sample_shape + dist.batch_shape + dist.event_shape
assert any(d > 1 for d in fat_sample.shape)
target_shape = (num_samples,) + batch_shape + dist.event_shape
# if this site has any ancestors, choose ancestors from diagonal approximation
thin_sample = fat_sample
if thin_sample.shape != target_shape:
index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1)
squashed_dims = []
for squashed_dim, squashed_size in zip(range(1, len(thin_sample.shape)), thin_sample.shape[1:]):
if squashed_size > 1 and (target_shape[squashed_dim] == 1 or squashed_dim == 0):
# diagonal approximation: identify particle indices across populations
ancestor_index = torch.arange(squashed_size, device=thin_sample.device)
index[squashed_dim] = ancestor_index
squashed_dims.append(squashed_dim)
thin_sample = Vindex(thin_sample)[tuple(index)]
for squashed_dim in squashed_dims:
thin_sample = thin_sample.unsqueeze(squashed_dim)
assert thin_sample.shape == target_shape
return thin_sample
def enumerate_site(msg):
dist = msg["fn"]
num_samples = msg["infer"].get("num_samples", None)
if num_samples is None:
# Enumerate over the support of the distribution.
value = dist.enumerate_support(expand=msg["infer"].get("expand", False))
elif num_samples > 1 and not msg["infer"].get("expand", False):
tmc_strategy = msg["infer"].get("tmc", "diagonal")
if tmc_strategy == "mixture":
value = _tmc_mixture_sample(msg)
elif tmc_strategy == "diagonal":
value = _tmc_diagonal_sample(msg)
else:
raise ValueError("{} not a valid TMC strategy".format(tmc_strategy))
elif num_samples > 1 and msg["infer"]["expand"]:
# Monte Carlo sample the distribution.
value = dist(sample_shape=(num_samples,))
assert value.dim() == 1 + len(dist.batch_shape) + len(dist.event_shape)
return value
class EnumMessenger(Messenger):
"""
Enumerates in parallel over discrete sample sites marked
``infer={"enumerate": "parallel"}``.
:param int first_available_dim: The first tensor dimension (counting
from the right) that is available for parallel enumeration. This
dimension and all dimensions left may be used internally by Pyro.
This should be a negative integer or None.
"""
def __init__(self, first_available_dim=None):
assert first_available_dim is None or first_available_dim < 0, first_available_dim
self.first_available_dim = first_available_dim
super().__init__()
def __enter__(self):
if self.first_available_dim is not None:
_ENUM_ALLOCATOR.set_first_available_dim(self.first_available_dim)
self._markov_depths = {} # site name -> depth (nonnegative integer)
self._param_dims = {} # site name -> (enum dim -> unique id)
self._value_dims = {} # site name -> (enum dim -> unique id)
return super().__enter__()
@ignore_jit_warnings()
def _pyro_sample(self, msg):
"""
:param msg: current message at a trace site.
:returns: a sample from the stochastic function at the site.
"""
if msg["done"] or not isinstance(msg["fn"], TorchDistributionMixin):
return
# Compute upstream dims in scope; these are unsafe to use for this site's target_dim.
scope = msg["infer"].get("_markov_scope") # site name -> markov depth
param_dims = _ENUM_ALLOCATOR.dim_to_id.copy() # enum dim -> unique id
if scope is not None:
for name, depth in scope.items():
if self._markov_depths[name] == depth: # hide sites whose markov context has exited
param_dims.update(self._value_dims[name])
self._markov_depths[msg["name"]] = msg["infer"]["_markov_depth"]
self._param_dims[msg["name"]] = param_dims
if msg["is_observed"] or msg["infer"].get("enumerate") != "parallel":
return
# Compute an enumerated value (at an arbitrary dim).
value = enumerate_site(msg)
actual_dim = -1 - len(msg["fn"].batch_shape) # the leftmost dim of log_prob
# Move actual_dim to a safe target_dim.
target_dim, id_ = _ENUM_ALLOCATOR.allocate(None if scope is None else param_dims)
event_dim = msg["fn"].event_dim
categorical_support = getattr(value, '_pyro_categorical_support', None)
if categorical_support is not None:
# Preserve categorical supports to speed up Categorical.log_prob().
# See pyro/distributions/torch.py for details.
assert target_dim < 0
value = value.reshape(value.shape[:1] + (1,) * (-1 - target_dim))
value._pyro_categorical_support = categorical_support
elif actual_dim < target_dim:
assert value.size(target_dim - event_dim) == 1, \
'pyro.markov dim conflict at dim {}'.format(actual_dim)
value = value.transpose(target_dim - event_dim, actual_dim - event_dim)
while value.dim() and value.size(0) == 1:
value = value.squeeze(0)
elif target_dim < actual_dim:
diff = actual_dim - target_dim
value = value.reshape(value.shape[:1] + (1,) * diff + value.shape[1:])
# Compute dims passed downstream through the value.
value_dims = {dim: param_dims[dim] for dim in range(event_dim - value.dim(), 0)
if value.size(dim - event_dim) > 1 and dim in param_dims}
value_dims[target_dim] = id_
msg["infer"]["_enumerate_dim"] = target_dim
msg["infer"]["_dim_to_id"] = value_dims
msg["value"] = value
msg["done"] = True
def _pyro_post_sample(self, msg):
# Save all dims exposed in this sample value.
# Whereas all of site["_dim_to_id"] are needed to interpret a
# site's log_prob tensor, only a filtered subset self._value_dims[msg["name"]]
# are needed to interpret a site's value.
if not isinstance(msg["fn"], TorchDistributionMixin):
return
value = msg["value"]
if value is None:
return
shape = value.shape[:value.dim() - msg["fn"].event_dim]
dim_to_id = msg["infer"].setdefault("_dim_to_id", {})
dim_to_id.update(self._param_dims.get(msg["name"], {}))
with ignore_jit_warnings():
self._value_dims[msg["name"]] = {dim: id_ for dim, id_ in dim_to_id.items()
if len(shape) >= -dim and shape[dim] > 1}
| [
"torch.zeros",
"torch.arange"
] | 1.4.0 | patrickeganfoley/pyro | 3bd5e099e85f3686c66fc3b53476c3b009a77a02 |
1.0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""E2E-TTS training / decoding functions."""
import copy
import json
import logging
import math
import os
import time
import chainer
import kaldiio
import numpy as np
import torch
from chainer import training
from chainer.training import extensions
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init import load_trained_modules
from espnet.nets.pytorch_backend.nets_utils import pad_list
from espnet.nets.tts_interface import TTSInterface
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
from espnet.utils.training.iterators import ShufflingEnabler
import matplotlib
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from tensorboardX import SummaryWriter
matplotlib.use('Agg')
class CustomEvaluator(BaseEvaluator):
"""Custom evaluator."""
def __init__(self, model, iterator, target, device):
"""Initilize module.
Args:
model (torch.nn.Module): Pytorch model instance.
iterator (chainer.dataset.Iterator): Iterator for validation.
target (chainer.Chain): Dummy chain instance.
device (torch.device): The device to be used in evaluation.
"""
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
# The core part of the update routine can be customized by overriding.
def evaluate(self):
"""Evaluate over validation iterator."""
iterator = self._iterators['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = chainer.reporter.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
observation = {}
with chainer.reporter.report_scope(observation):
# convert to torch tensor
if isinstance(x, tuple):
self.model(*x)
else:
self.model(**x)
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(training.StandardUpdater):
"""Custom updater."""
def __init__(self, model, grad_clip, iterator, optimizer, device, accum_grad=1):
"""Initilize module.
Args:
model (torch.nn.Module) model: Pytorch model instance.
grad_clip (float) grad_clip : The gradient clipping value.
iterator (chainer.dataset.Iterator): Iterator for training.
optimizer (torch.optim.Optimizer) : Pytorch optimizer instance.
device (torch.device): The device to be used in training.
"""
super(CustomUpdater, self).__init__(iterator, optimizer)
self.model = model
self.grad_clip = grad_clip
self.device = device
self.clip_grad_norm = torch.nn.utils.clip_grad_norm_
self.accum_grad = accum_grad
self.forward_count = 0
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Update model one step."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator('main')
optimizer = self.get_optimizer('main')
# Get the next batch (a list of json files)
batch = train_iter.next()
if isinstance(batch, tuple):
x = tuple(arr.to(self.device) for arr in batch)
else:
x = batch
for key in x.keys():
x[key] = x[key].to(self.device)
# compute loss and gradient
if isinstance(x, tuple):
loss = self.model(*x).mean() / self.accum_grad
else:
loss = self.model(**x).mean() / self.accum_grad
loss.backward()
# update parameters
self.forward_count += 1
if self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = self.clip_grad_norm(self.model.parameters(), self.grad_clip)
logging.debug('grad norm={}'.format(grad_norm))
if math.isnan(grad_norm):
logging.warning('grad norm is nan. Do not update model.')
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
"""Run update function."""
self.update_core()
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom converter."""
def __init__(self):
"""Initilize module."""
# NOTE: keep as class for future development
pass
def __call__(self, batch, device=torch.device('cpu')):
"""Convert a given batch.
Args:
batch (list): List of ndarrays.
device (torch.device): The device to be send.
Returns:
dict: Dict of converted tensors.
Examples:
>>> batch = [([np.arange(5), np.arange(3)],
[np.random.randn(8, 2), np.random.randn(4, 2)],
None, None)]
>>> conveter = CustomConverter()
>>> conveter(batch, torch.device("cpu"))
{'xs': tensor([[0, 1, 2, 3, 4],
[0, 1, 2, 0, 0]]),
'ilens': tensor([5, 3]),
'ys': tensor([[[-0.4197, -1.1157],
[-1.5837, -0.4299],
[-2.0491, 0.9215],
[-2.4326, 0.8891],
[ 1.2323, 1.7388],
[-0.3228, 0.6656],
[-0.6025, 1.3693],
[-1.0778, 1.3447]],
[[ 0.1768, -0.3119],
[ 0.4386, 2.5354],
[-1.2181, -0.5918],
[-0.6858, -0.8843],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000],
[ 0.0000, 0.0000]]]),
'labels': tensor([[0., 0., 0., 0., 0., 0., 0., 1.],
[0., 0., 0., 1., 1., 1., 1., 1.]]),
'olens': tensor([8, 4])}
"""
# batch should be located in list
assert len(batch) == 1
xs, ys, spembs, extras = batch[0]
# get list of lengths (must be tensor for DataParallel)
ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).long().to(device)
olens = torch.from_numpy(np.array([y.shape[0] for y in ys])).long().to(device)
# perform padding and conversion to tensor
xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)
ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)
# make labels for stop prediction
labels = ys.new_zeros(ys.size(0), ys.size(1))
for i, l in enumerate(olens):
labels[i, l - 1:] = 1.0
# prepare dict
new_batch = {
"xs": xs,
"ilens": ilens,
"ys": ys,
"labels": labels,
"olens": olens,
}
# load speaker embedding
if spembs is not None:
spembs = torch.from_numpy(np.array(spembs)).float()
new_batch["spembs"] = spembs.to(device)
# load second target
if extras is not None:
extras = pad_list([torch.from_numpy(extra).float() for extra in extras], 0)
new_batch["extras"] = extras.to(device)
return new_batch
def train(args):
"""Train E2E-TTS model."""
set_deterministic_pytorch(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
# reverse input and output dimension
idim = int(valid_json[utts[0]]['output'][0]['shape'][1])
odim = int(valid_json[utts[0]]['input'][0]['shape'][1])
logging.info('#input dims : ' + str(idim))
logging.info('#output dims: ' + str(odim))
# get extra input and output dimenstion
if args.use_speaker_embedding:
args.spk_embed_dim = int(valid_json[utts[0]]['input'][1]['shape'][0])
else:
args.spk_embed_dim = None
if args.use_second_target:
args.spc_dim = int(valid_json[utts[0]]['input'][1]['shape'][1])
else:
args.spc_dim = None
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to' + model_conf)
f.write(json.dumps((idim, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
# specify model architecture
if args.enc_init is not None or args.dec_init is not None:
model = load_trained_modules(idim, odim, args, TTSInterface)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim, odim, args)
assert isinstance(model, TTSInterface)
logging.info(model)
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))
if args.batch_size != 0:
logging.warning('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# Setup an optimizer
if args.opt == 'adam':
optimizer = torch.optim.Adam(
model.parameters(), args.lr, eps=args.eps,
weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# FIXME: TOO DIRTY HACK
setattr(optimizer, 'target', reporter)
setattr(optimizer, 'serialize', lambda s: reporter.serialize(s))
# read json data
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
if use_sortagrad:
args.batch_sort_key = "input"
# make minibatch list (variable length)
train_batchset = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True, iaxis=0, oaxis=0)
valid_batchset = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
batch_sort_key=args.batch_sort_key,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
swap_io=True, iaxis=0, oaxis=0)
load_tr = LoadInputsAndTargets(
mode='tts',
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={'train': True}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
load_cv = LoadInputsAndTargets(
mode='tts',
use_speaker_embedding=args.use_speaker_embedding,
use_second_target=args.use_second_target,
preprocess_conf=args.preprocess_conf,
preprocess_args={'train': False}, # Switch the mode of preprocessing
keep_all_data_on_mem=args.keep_all_data_on_mem,
)
converter = CustomConverter()
# hack to make batchsize argument as 1
# actual bathsize is included in a list
train_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(train_batchset, lambda data: converter([load_tr(data)])),
batch_size=1, num_workers=args.num_iter_processes,
shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}
valid_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(valid_batchset, lambda data: converter([load_cv(data)])),
batch_size=1, shuffle=False, collate_fn=lambda x: x[0],
num_workers=args.num_iter_processes)}
# Set up a trainer
updater = CustomUpdater(model, args.grad_clip, train_iter, optimizer, device, args.accum_grad)
trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.outdir)
# Resume from a snapshot
if args.resume:
logging.info('resumed from %s' % args.resume)
torch_resume(args.resume, trainer)
# set intervals
eval_interval = (args.eval_interval_epochs, 'epoch')
save_interval = (args.save_interval_epochs, 'epoch')
report_interval = (args.report_interval_iters, 'iteration')
# Evaluate the model with the test dataset for each epoch
trainer.extend(CustomEvaluator(
model, valid_iter, reporter, device), trigger=eval_interval)
# Save snapshot for each epoch
trainer.extend(torch_snapshot(), trigger=save_interval)
# Save best models
trainer.extend(snapshot_object(model, 'model.loss.best'),
trigger=training.triggers.MinValueTrigger(
'validation/main/loss', trigger=eval_interval))
# Save attention figure for each epoch
if args.num_save_attention > 0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn, data, args.outdir + '/att_ws',
converter=converter,
transform=load_cv,
device=device, reverse=True)
trainer.extend(att_reporter, trigger=eval_interval)
else:
att_reporter = None
# Make a plot for training and validation values
if hasattr(model, "module"):
base_plot_keys = model.module.base_plot_keys
else:
base_plot_keys = model.base_plot_keys
plot_keys = []
for key in base_plot_keys:
plot_key = ['main/' + key, 'validation/main/' + key]
trainer.extend(extensions.PlotReport(
plot_key, 'epoch', file_name=key + '.png'), trigger=eval_interval)
plot_keys += plot_key
trainer.extend(extensions.PlotReport(
plot_keys, 'epoch', file_name='all_loss.png'), trigger=eval_interval)
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=report_interval))
report_keys = ['epoch', 'iteration', 'elapsed_time'] + plot_keys
trainer.extend(extensions.PrintReport(report_keys), trigger=report_interval)
trainer.extend(extensions.ProgressBar(), trigger=report_interval)
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
writer = SummaryWriter(args.tensorboard_dir)
trainer.extend(TensorboardLogger(writer, att_reporter), trigger=report_interval)
if use_sortagrad:
trainer.extend(ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
@torch.no_grad()
def decode(args):
"""Decode with E2E-TTS model."""
set_deterministic_pytorch(args)
# read training config
idim, odim, train_args = get_model_conf(args.model, args.model_conf)
# show arguments
for key in sorted(vars(args).keys()):
logging.info('args: ' + key + ': ' + str(vars(args)[key]))
# define model
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
assert isinstance(model, TTSInterface)
logging.info(model)
# load trained model parameters
logging.info('reading model parameters from ' + args.model)
torch_load(args.model, model)
model.eval()
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
model = model.to(device)
# read json data
with open(args.json, 'rb') as f:
js = json.load(f)['utts']
# check directory
outdir = os.path.dirname(args.out)
if len(outdir) != 0 and not os.path.exists(outdir):
os.makedirs(outdir)
load_inputs_and_targets = LoadInputsAndTargets(
mode='tts', load_input=False, sort_in_input_length=False,
use_speaker_embedding=train_args.use_speaker_embedding,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False} # Switch the mode of preprocessing
)
# define function for plot prob and att_ws
def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):
import matplotlib.pyplot as plt
shape = array.shape
if len(shape) == 1:
# for eos probability
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(array)
plt.xlabel("Frame")
plt.ylabel("Probability")
plt.ylim([0, 1])
elif len(shape) == 2:
# for tacotron 2 attention weights, whose shape is (out_length, in_length)
plt.figure(figsize=figsize, dpi=dpi)
plt.imshow(array, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
elif len(shape) == 4:
# for transformer attention weights, whose shape is (#leyers, #heads, out_length, in_length)
plt.figure(figsize=(figsize[0] * shape[0], figsize[1] * shape[1]), dpi=dpi)
for idx1, xs in enumerate(array):
for idx2, x in enumerate(xs, 1):
plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)
plt.imshow(x, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
else:
raise NotImplementedError("Support only from 1D to 4D array.")
plt.tight_layout()
if not os.path.exists(os.path.dirname(figname)):
# NOTE: exist_ok = True is needed for parallel process decoding
os.makedirs(os.path.dirname(figname), exist_ok=True)
plt.savefig(figname)
plt.close()
# define function to calculate focus rate (see section 3.3 in https://arxiv.org/abs/1905.09263)
def _calculate_focus_rete(att_ws):
if att_ws is None:
# fastspeech case -> None
return 1.0
elif len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
return float(att_ws.max(dim=-1)[0].mean())
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
return float(att_ws.max(dim=-1)[0].mean(dim=-1).max())
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# define function to convert attention to duration
def _convert_att_to_duration(att_ws):
if len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
pass
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
# get the most diagonal head according to focus rate
att_ws = torch.cat([att_w for att_w in att_ws], dim=0) # (#heads * #layers, L, T)
diagonal_scores = att_ws.max(dim=-1)[0].mean(dim=-1) # (#heads * #layers,)
diagonal_head_idx = diagonal_scores.argmax()
att_ws = att_ws[diagonal_head_idx] # (L, T)
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
# calculate duration from 2d attention weight
durations = torch.stack([att_ws.argmax(-1).eq(i).sum() for i in range(att_ws.shape[1])])
return durations.view(-1, 1).float()
# define writer instances
feat_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(o=args.out))
if args.save_durations:
dur_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(
o=args.out.replace("feats", "durations")))
if args.save_focus_rates:
fr_writer = kaldiio.WriteHelper(
'ark,scp:{o}.ark,{o}.scp'.format(
o=args.out.replace("feats", "focus_rates")))
# start decoding
for idx, utt_id in enumerate(js.keys()):
# setup inputs
batch = [(utt_id, js[utt_id])]
data = load_inputs_and_targets(batch)
x = torch.LongTensor(data[0][0]).to(device)
spemb = None
if train_args.use_speaker_embedding:
spemb = torch.FloatTensor(data[1][0]).to(device)
# decode and write
start_time = time.time()
outs, probs, att_ws = model.inference(x, args, spemb=spemb)
logging.info("inference speed = %.1f frames / sec." % (
int(outs.size(0)) / (time.time() - start_time)))
if outs.size(0) == x.size(0) * args.maxlenratio:
logging.warning("output length reaches maximum length (%s)." % utt_id)
focus_rate = _calculate_focus_rete(att_ws)
logging.info('(%d/%d) %s (size: %d->%d, focus rate: %.3f)' % (
idx + 1, len(js.keys()), utt_id, x.size(0), outs.size(0), focus_rate))
feat_writer[utt_id] = outs.cpu().numpy()
if args.save_durations:
ds = _convert_att_to_duration(att_ws)
dur_writer[utt_id] = ds.cpu().numpy()
if args.save_focus_rates:
fr_writer[utt_id] = np.array(focus_rate).reshape(1, 1)
# plot and save prob and att_ws
if probs is not None:
_plot_and_save(probs.cpu().numpy(), os.path.dirname(args.out) + "/probs/%s_prob.png" % utt_id)
if att_ws is not None:
_plot_and_save(att_ws.cpu().numpy(), os.path.dirname(args.out) + "/att_ws/%s_att_ws.png" % utt_id)
# close file object
feat_writer.close()
if args.save_durations:
dur_writer.close()
if args.save_focus_rates:
fr_writer.close()
| [
"torch.cat",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.FloatTensor",
"torch.device",
"torch.no_grad",
"torch.from_numpy"
] | 1.0.1 | kokeshing/espnet | 9e2bfc5cdecbb8846f5c6cb26d22010b06e98c40 |
1.10 | import random
from random import shuffle
import PIL
import torch
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from torchvision.transforms import ToTensor
from tqdm import tqdm
from hw_asr.base import BaseTrainer
from hw_asr.logger.utils import plot_spectrogram_to_buf
from hw_asr.metric.utils import calc_cer, calc_wer
from hw_asr.utils import inf_loop, MetricTracker
class Trainer(BaseTrainer):
"""
Trainer class
"""
def __init__(
self,
model,
criterion,
metrics,
optimizer,
config,
device,
data_loader,
text_encoder,
valid_data_loader=None,
lr_scheduler=None,
len_epoch=None,
skip_oom=True,
):
super().__init__(model, criterion, metrics, optimizer, config, device)
self.skip_oom = skip_oom
self.text_encoder = text_encoder
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
self.len_epoch = len(self.data_loader)
else:
# iteration-based training
###OBO
#self.data_loader = inf_loop(data_loader)
###OBO
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
###OBO
self.do_validation = False #self.valid_data_loader is not None
###OBO
self.lr_scheduler = lr_scheduler
self.log_step = 10
self.train_metrics = MetricTracker(
"loss", "grad norm", *[m.name for m in self.metrics], writer=self.writer
)
self.valid_metrics = MetricTracker(
"loss", *[m.name for m in self.metrics], writer=self.writer
)
@staticmethod
def move_batch_to_device(batch, device: torch.device):
"""
Move all necessary tensors to the HPU
"""
for tensor_for_gpu in ["spectrogram", "text_encoded"]:
batch[tensor_for_gpu] = batch[tensor_for_gpu].to(device)
return batch
def _clip_grad_norm(self):
if self.config["trainer"].get("grad_norm_clip", None) is not None:
clip_grad_norm_(
self.model.parameters(), self.config["trainer"]["grad_norm_clip"]
)
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains average loss and metric in this epoch.
"""
self.model.train()
self.train_metrics.reset()
self.writer.add_scalar("epoch", epoch)
for batch_idx, batch in enumerate(
tqdm(self.data_loader, desc="train", total=self.len_epoch)
):
###OBO
if batch_idx >= self.len_epoch:
break
###OBO
try:
batch = self.process_batch(
batch,
is_train=True,
metrics=self.train_metrics,
)
except RuntimeError as e:
if "out of memory" in str(e) and self.skip_oom:
self.logger.warning("OOM on batch. Skipping batch.")
for p in self.model.parameters():
if p.grad is not None:
del p.grad # free some memory
torch.cuda.empty_cache()
continue
else:
raise e
self.train_metrics.update("grad norm", self.get_grad_norm())
if batch_idx % self.log_step == 0:
self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.logger.debug(
"Train Epoch: {} {} Loss: {:.6f}".format(
epoch, self._progress(batch_idx), batch["loss"].item()
)
)
self.writer.add_scalar(
"learning rate", self.lr_scheduler.get_last_lr()[0]
)
self._log_predictions(part="train", **batch)
self._log_spectrogram(batch["spectrogram"])
self._log_scalars(self.train_metrics)
log = self.train_metrics.result()
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(**{"val_" + k: v for k, v in val_log.items()})
return log
def process_batch(self, batch, is_train: bool, metrics: MetricTracker):
batch = self.move_batch_to_device(batch, self.device)
if is_train:
self.optimizer.zero_grad()
outputs = self.model(**batch)
if type(outputs) is dict:
batch.update(outputs)
else:
batch["logits"] = outputs
batch["log_probs"] = F.log_softmax(batch["logits"], dim=-1)
batch["log_probs_length"] = self.model.transform_input_lengths(
batch["spectrogram_length"]
)
batch["loss"] = self.criterion(**batch)
if is_train:
batch["loss"].backward()
self._clip_grad_norm()
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
metrics.update("loss", batch["loss"].item())
for met in self.metrics:
metrics.update(met.name, met(**batch))
return batch
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:param epoch: Integer, current training epoch.
:return: A log that contains information about validation
"""
self.model.eval()
self.valid_metrics.reset()
with torch.no_grad():
for batch_idx, batch in tqdm(
enumerate(self.valid_data_loader),
desc="validation",
total=len(self.valid_data_loader),
):
batch = self.process_batch(
batch,
is_train=False,
metrics=self.valid_metrics,
)
self.writer.set_step(epoch * self.len_epoch, "valid")
self._log_scalars(self.valid_metrics)
self._log_predictions(part="val", **batch)
self._log_spectrogram(batch["spectrogram"])
# add histogram of model parameters to the tensorboard
for name, p in self.model.named_parameters():
self.writer.add_histogram(name, p, bins="auto")
return self.valid_metrics.result()
def _progress(self, batch_idx):
base = "[{}/{} ({:.0f}%)]"
if hasattr(self.data_loader, "n_samples"):
current = batch_idx * self.data_loader.batch_size
total = self.data_loader.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
def _log_predictions(
self,
text,
log_probs,
log_probs_length,
examples_to_log=20,
*args,
**kwargs,
):
# TODO: implement logging of beam search results
if self.writer is None:
return
argmax_inds = log_probs.cpu().argmax(-1)
argmax_inds = [
inds[: int(ind_len)]
for inds, ind_len in zip(argmax_inds, log_probs_length)
]
argmax_texts_raw = [self.text_encoder.decode(inds) for inds in argmax_inds]
argmax_texts = [self.text_encoder.ctc_decode(inds) for inds in argmax_inds]
tuples = list(zip(argmax_texts, text, argmax_texts_raw))
shuffle(tuples)
to_log_pred = []
to_log_pred_raw = []
for pred, target, raw_pred in tuples[:examples_to_log]:
wer = calc_wer(target, pred) * 100
cer = calc_cer(target, pred) * 100
to_log_pred.append(
f"true: '{target}' | pred: '{pred}' "
f"| wer: {wer:.2f} | cer: {cer:.2f}"
)
to_log_pred_raw.append(f"true: '{target}' | pred: '{raw_pred}'\n")
self.writer.add_text(f"predictions", "< < < < > > > >".join(to_log_pred))
self.writer.add_text(
f"predictions_raw", "< < < < > > > >".join(to_log_pred_raw)
)
def _log_spectrogram(self, spectrogram_batch):
spectrogram = random.choice(spectrogram_batch)
image = PIL.Image.open(plot_spectrogram_to_buf(spectrogram.cpu().log()))
self.writer.add_image("spectrogram", ToTensor()(image))
@torch.no_grad()
def get_grad_norm(self, norm_type=2):
parameters = self.model.parameters()
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
total_norm = torch.norm(
torch.stack(
[torch.norm(p.grad.detach(), norm_type).cpu() for p in parameters]
),
norm_type,
)
return total_norm.item()
def _log_scalars(self, metric_tracker: MetricTracker):
if self.writer is None:
return
for metric_name in metric_tracker.keys():
self.writer.add_scalar(f"{metric_name}", metric_tracker.avg(metric_name))
| [
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.nn.functional.log_softmax"
] | 1.10.1 | Mrrrat/asr_project_template | 50d264684d90bc45c59f3e9be5766fabaf090d25 |
1.7 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-05-08 20:51
import functools
import os
from typing import Union, Any, List
import torch
from alnlp.modules.util import lengths_to_mask
from torch import nn
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
from elit.common.constant import UNK, IDX
from elit.common.dataset import PadSequenceDataLoader
from elit.common.structure import History
from elit.common.torch_component import TorchComponent
from elit.common.transform import LowerCase, FieldLength, PunctuationMask, TransformList
from elit.common.vocab import Vocab, VocabCounter
from elit.common.conll import CoNLLWord, CoNLLSentence
from elit.components.parsers.constituency.treecrf import CRF2oDependency
from elit.components.parsers.second_order.model import DependencyModel
from elit.components.parsers.second_order.treecrf_decoder import TreeCRFDecoder
from elit.datasets.parsing.conll_dataset import CoNLLParsingDataset, append_bos, get_sibs
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding, ContextualWordEmbeddingModule
from elit.layers.embeddings.embedding import Embedding, EmbeddingList, ConcatModuleList
from elit.layers.embeddings.util import index_word2vec_with_vocab
from elit.layers.transformers.pt_imports import AutoModel_
from elit.layers.transformers.utils import build_optimizer_scheduler_with_transformer
from elit.metrics.parsing.attachmentscore import AttachmentScore
from elit.transform.transformer_tokenizer import TransformerSequenceTokenizer
from elit.utils.time_util import CountdownTimer
from elit.common.util import merge_locals_kwargs, merge_dict, reorder
class TreeConditionalRandomFieldDependencyParser(TorchComponent):
def __init__(self) -> None:
super().__init__()
self.model: DependencyModel = self.model
self._transformer_transform = None
def predict(self, data: Any, batch_size=None, batch_max_tokens=None, output_format='conllx', **kwargs):
if not data:
return []
use_pos = self.use_pos
flat = self.input_is_flat(data, use_pos)
if flat:
data = [data]
samples = self.build_samples(data, use_pos)
if not batch_max_tokens:
batch_max_tokens = self.config.batch_max_tokens
if not batch_size:
batch_size = self.config.batch_size
dataloader = self.build_dataloader(samples,
device=self.devices[0], shuffle=False,
**merge_dict(self.config,
batch_size=batch_size,
batch_max_tokens=batch_max_tokens,
overwrite=True,
**kwargs))
predictions, build_data, data, order = self.before_outputs(data)
for batch in dataloader:
arc_scores, rel_scores, mask, puncts = self.feed_batch(batch)
self.collect_outputs(arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,
build_data)
outputs = self.post_outputs(predictions, data, order, use_pos, build_data)
if flat:
return outputs[0]
return outputs
def build_samples(self, data, use_pos=None):
samples = []
for idx, each in enumerate(data):
sample = {IDX: idx}
if use_pos:
token, pos = zip(*each)
sample.update({'FORM': list(token), 'CPOS': list(pos)})
else:
token = each
sample.update({'FORM': list(token)})
samples.append(sample)
return samples
def input_is_flat(self, data, use_pos=None):
if use_pos:
flat = isinstance(data[0], (list, tuple)) and isinstance(data[0][0], str)
else:
flat = isinstance(data[0], str)
return flat
def before_outputs(self, data):
predictions, order = [], []
build_data = data is None
if build_data:
data = []
return predictions, build_data, data, order
def post_outputs(self, predictions, data, order, use_pos, build_data):
predictions = reorder(predictions, order)
if build_data:
data = reorder(data, order)
outputs = []
self.predictions_to_human(predictions, outputs, data, use_pos)
return outputs
def predictions_to_human(self, predictions, outputs, data, use_pos):
for d, (arcs, rels) in zip(data, predictions):
sent = CoNLLSentence()
for idx, (cell, a, r) in enumerate(zip(d, arcs, rels)):
if use_pos:
token, pos = cell
else:
token, pos = cell, None
sent.append(CoNLLWord(idx + 1, token, cpos=pos, head=a, deprel=self.vocabs['rel'][r]))
outputs.append(sent)
def collect_outputs(self, arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,
build_data):
lens = [len(token) - 1 for token in batch['token']]
arc_preds, rel_preds = self.decode(arc_scores, rel_scores, mask, batch)
self.collect_outputs_extend(predictions, arc_preds, rel_preds, lens, mask)
order.extend(batch[IDX])
if build_data:
if use_pos:
data.extend(zip(batch['FORM'], batch['CPOS']))
else:
data.extend(batch['FORM'])
def collect_outputs_extend(self, predictions: list, arc_preds, rel_preds, lens, mask):
predictions.extend(zip([seq.tolist() for seq in arc_preds[mask].split(lens)],
[seq.tolist() for seq in rel_preds[mask].split(lens)]))
def fit(self,
trn_data,
dev_data,
save_dir,
embed,
n_mlp_arc=500,
n_mlp_rel=100,
n_mlp_sib=100,
mlp_dropout=.33,
lr=2e-3,
transformer_lr=5e-5,
mu=.9,
nu=.9,
epsilon=1e-12,
grad_norm=5.0,
decay=.75,
decay_steps=5000,
weight_decay=0,
warmup_steps=0.1,
separate_optimizer=True,
patience=100,
lowercase=False,
epochs=50000,
tree=False,
proj=True,
mbr=True,
partial=False,
punct=False,
min_freq=2,
logger=None,
verbose=True,
unk=UNK,
max_sequence_length=512,
batch_size=None,
sampler_builder=None,
gradient_accumulation=1,
devices: Union[float, int, List[int]] = None,
transform=None,
eval_trn=False,
bos='\0',
**kwargs):
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def execute_training_loop(self, trn, dev, devices, epochs, logger, patience, save_dir, optimizer,
gradient_accumulation, **kwargs):
optimizer, scheduler, transformer_optimizer, transformer_scheduler = optimizer
criterion = self.build_criterion()
best_e, best_metric = 0, self.build_metric()
timer = CountdownTimer(epochs)
history = History()
ratio_width = len(f'{len(trn) // gradient_accumulation}/{len(trn) // gradient_accumulation}')
for epoch in range(1, epochs + 1):
# train one epoch and update the parameters
logger.info(f"[yellow]Epoch {epoch} / {epochs}:[/yellow]")
self.fit_dataloader(trn, optimizer, scheduler, criterion, epoch, logger, history,
transformer_optimizer, transformer_scheduler,
gradient_accumulation=gradient_accumulation, eval_trn=self.config.eval_trn)
loss, dev_metric = self.evaluate_dataloader(dev, criterion, ratio_width=ratio_width, logger=logger)
timer.update()
# logger.info(f"{'Dev' + ' ' * ratio_width} loss: {loss:.4f} {dev_metric}")
# save the model if it is the best so far
report = f"{timer.elapsed_human} / {timer.total_time_human} ETA: {timer.eta_human}"
if dev_metric > best_metric:
best_e, best_metric = epoch, dev_metric
self.save_weights(save_dir)
report += ' ([red]saved[/red])'
else:
if patience != epochs:
report += f' ({epoch - best_e}/{patience})'
else:
report += f' ({epoch - best_e})'
logger.info(report)
if patience is not None and epoch - best_e >= patience:
logger.info(f'LAS has stopped improving for {patience} epochs, early stop.')
break
timer.stop()
if not best_e:
self.save_weights(save_dir)
elif best_e != epoch:
self.load_weights(save_dir)
logger.info(f"Max score of dev is {best_metric.score:.2%} at epoch {best_e}")
logger.info(f"Average time of each epoch is {timer.elapsed_average_human}")
logger.info(f"{timer.elapsed_human} elapsed")
def build_optimizer(self, epochs, trn, gradient_accumulation, **kwargs):
config = self.config
model = self.model
if isinstance(model, nn.DataParallel):
model = model.module
transformer = self._get_transformer_builder()
if transformer and transformer.trainable:
transformer = self._get_transformer()
optimizer = Adam(set(model.parameters()) - set(transformer.parameters()),
config.lr,
(config.mu, config.nu),
config.epsilon)
if self.config.transformer_lr:
num_training_steps = len(trn) * epochs // gradient_accumulation
if not self.config.separate_optimizer:
optimizer, scheduler = build_optimizer_scheduler_with_transformer(model,
transformer,
config.lr,
config.transformer_lr,
num_training_steps,
config.warmup_steps,
config.weight_decay,
config.epsilon)
transformer_optimizer, transformer_scheduler = None, None
else:
transformer_optimizer, transformer_scheduler = \
build_optimizer_scheduler_with_transformer(transformer,
transformer,
config.lr,
config.transformer_lr,
num_training_steps,
config.warmup_steps,
config.weight_decay,
config.epsilon)
else:
transformer.requires_grad_(False)
transformer_optimizer, transformer_scheduler = None, None
else:
optimizer = Adam(model.parameters(),
config.lr,
(config.mu, config.nu),
config.epsilon)
transformer_optimizer, transformer_scheduler = None, None
if self.config.separate_optimizer:
scheduler = ExponentialLR(optimizer, config.decay ** (1 / config.decay_steps))
# noinspection PyUnboundLocalVariable
optimizer = Adam(model.parameters(), **{'lr': 0.002, 'betas': (0.9, 0.9), 'eps': 1e-12})
scheduler = ExponentialLR(optimizer, **{'gamma': 0.9999424652406974})
return optimizer, scheduler, transformer_optimizer, transformer_scheduler
# noinspection PyMethodOverriding
def build_dataloader(self,
data,
shuffle,
device,
embed: Embedding,
training=False,
logger=None,
gradient_accumulation=1,
sampler_builder=None,
batch_size=None,
bos='\0',
**kwargs) -> DataLoader:
first_transform = TransformList(functools.partial(append_bos, bos=bos))
embed_transform = embed.transform(vocabs=self.vocabs)
transformer_transform = self._get_transformer_transform_from_transforms(embed_transform)
if embed_transform:
if transformer_transform and isinstance(embed_transform, TransformList):
embed_transform.remove(transformer_transform)
first_transform.append(embed_transform)
dataset = self.build_dataset(data, first_transform=first_transform)
if self.config.get('transform', None):
dataset.append_transform(self.config.transform)
if self.vocabs.mutable:
self.build_vocabs(dataset, logger, self._transformer_trainable())
if transformer_transform and isinstance(embed_transform, TransformList):
embed_transform.append(transformer_transform)
dataset.append_transform(FieldLength('token', 'sent_length'))
if isinstance(data, str):
dataset.purge_cache()
if len(dataset) > 1000 and isinstance(data, str):
timer = CountdownTimer(len(dataset))
self.cache_dataset(dataset, timer, training, logger)
if sampler_builder:
lens = [sample['sent_length'] for sample in dataset]
sampler = sampler_builder.build(lens, shuffle, gradient_accumulation)
else:
sampler = None
loader = PadSequenceDataLoader(dataset=dataset,
batch_sampler=sampler,
batch_size=batch_size,
pad=self.get_pad_dict(),
device=device,
vocabs=self.vocabs)
return loader
def cache_dataset(self, dataset, timer, training=False, logger=None):
for each in dataset:
timer.log('Preprocessing and caching samples [blink][yellow]...[/yellow][/blink]')
def get_pad_dict(self):
return {'arc': 0}
def build_dataset(self, data, first_transform=None):
if not first_transform:
first_transform = append_bos
transform = [first_transform, get_sibs]
if self.config.get('lowercase', False):
transform.append(LowerCase('token'))
transform.append(self.vocabs)
if not self.config.punct:
transform.append(PunctuationMask('token', 'punct_mask'))
return CoNLLParsingDataset(data, transform=transform)
def build_tokenizer_transform(self):
return TransformerSequenceTokenizer(self.transformer_tokenizer, 'token', '',
ret_token_span=True, cls_is_bos=True,
max_seq_length=self.config.get('max_sequence_length',
512),
truncate_long_sequences=False)
def build_vocabs(self, dataset, logger=None, transformer=False):
rel_vocab = self.vocabs.get('rel', None)
if rel_vocab is None:
rel_vocab = Vocab(unk_token=None, pad_token=self.config.get('pad_rel', None))
self.vocabs.put(rel=rel_vocab)
timer = CountdownTimer(len(dataset))
if transformer:
token_vocab = None
else:
self.vocabs.token = token_vocab = VocabCounter(unk_token=self.config.get('unk', UNK))
for i, sample in enumerate(dataset):
timer.log('Building vocab [blink][yellow]...[/yellow][/blink]', ratio_percentage=True)
min_freq = self.config.get('min_freq', None)
if min_freq:
token_vocab.trim(min_freq)
rel_vocab.set_unk_as_safe_unk() # Some relation in dev set is OOV
self.vocabs.lock()
self.vocabs.summary(logger=logger)
if token_vocab:
self.config.n_words = len(self.vocabs['token'])
self.config.n_rels = len(self.vocabs['rel'])
if token_vocab:
self.config.pad_index = self.vocabs['token'].pad_idx
self.config.unk_index = self.vocabs['token'].unk_idx
# noinspection PyMethodOverriding
def build_model(self, embed: Embedding, encoder, n_mlp_arc, n_mlp_rel, mlp_dropout, n_mlp_sib, training=True,
**kwargs) -> torch.nn.Module:
model = DependencyModel(
embed=embed.module(vocabs=self.vocabs),
encoder=encoder,
decoder=TreeCRFDecoder(encoder.get_output_dim(), n_mlp_arc, n_mlp_sib, n_mlp_rel, mlp_dropout,
len(self.vocabs['rel']))
)
return model
def build_embeddings(self, training=True):
pretrained_embed = None
if self.config.get('pretrained_embed', None):
pretrained_embed = index_word2vec_with_vocab(self.config.pretrained_embed, self.vocabs['token'],
init='zeros', normalize=True)
transformer = self.config.transformer
if transformer:
transformer = AutoModel_.from_pretrained(transformer, training=training)
return pretrained_embed, transformer
# noinspection PyMethodOverriding
def fit_dataloader(self,
trn,
optimizer,
scheduler,
criterion,
epoch,
logger,
history: History,
transformer_optimizer=None,
transformer_scheduler=None,
gradient_accumulation=1,
eval_trn=False,
**kwargs):
self.model.train()
timer = CountdownTimer(history.num_training_steps(len(trn), gradient_accumulation))
metric = self.build_metric(training=True)
total_loss = 0
for idx, batch in enumerate(trn):
optimizer.zero_grad()
(s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)
arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']
loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)
if gradient_accumulation > 1:
loss /= gradient_accumulation
loss.backward()
total_loss += loss.item()
if eval_trn:
arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)
self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)
if history.step(gradient_accumulation):
self._step(optimizer, scheduler, transformer_optimizer, transformer_scheduler)
report = self._report(total_loss / (timer.current + 1), metric if eval_trn else None)
lr = scheduler.get_last_lr()[0]
report += f' lr: {lr:.4e}'
timer.log(report, ratio_percentage=False, logger=logger)
del loss
def _step(self, optimizer, scheduler, transformer_optimizer, transformer_scheduler):
if self.config.get('grad_norm', None):
nn.utils.clip_grad_norm_(self.model.parameters(),
self.config.grad_norm)
optimizer.step()
scheduler.step()
if self._transformer_transform and self.config.transformer_lr and transformer_optimizer:
transformer_optimizer.step()
transformer_optimizer.zero_grad()
transformer_scheduler.step()
def feed_batch(self, batch):
words, feats, lens, puncts = batch.get('token_id', None), batch.get('pos_id', None), batch['sent_length'], \
batch.get('punct_mask', None)
mask = lengths_to_mask(lens)
logits = self.model(batch, mask)
if self.model.training:
mask = mask.clone()
# ignore the first token of each sentence
mask[:, 0] = 0
return logits, mask, puncts
def _report(self, loss, metric: AttachmentScore = None):
return f'loss: {loss:.4f} {metric}' if metric else f'loss: {loss:.4f}'
def compute_loss(self, s_arc, s_sib, s_rel, arcs, sibs, rels, mask):
crf: CRF2oDependency = self.model.decoder.crf
return crf.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.config.mbr, self.config.partial)
# noinspection PyUnboundLocalVariable
@torch.no_grad()
def evaluate_dataloader(self, loader: PadSequenceDataLoader, criterion, logger=None, filename=None, output=False,
ratio_width=None,
metric=None,
**kwargs):
self.model.eval()
total_loss = 0
if not metric:
metric = self.build_metric()
timer = CountdownTimer(len(loader))
for batch in loader:
(s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)
arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']
loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)
total_loss += float(loss)
arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)
self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)
report = self._report(total_loss / (timer.current + 1), metric)
if filename:
report = f'{os.path.basename(filename)} ' + report
timer.log(report, ratio_percentage=False, logger=logger, ratio_width=ratio_width)
total_loss /= len(loader)
return total_loss, metric
def update_metric(self, arc_preds, rel_preds, arcs, rels, mask, puncts, metric):
# ignore all punctuation if not specified
if not self.config.punct:
mask &= puncts
metric(arc_preds, rel_preds, arcs, rels, mask)
def decode(self, s_arc, s_sib, s_rel, mask):
crf: CRF2oDependency = self.model.decoder.crf
return crf.decode(s_arc, s_sib, s_rel, mask, self.config.tree and not self.model.training, self.config.mbr,
self.config.proj)
def build_criterion(self, **kwargs):
return None
def build_metric(self, **kwargs):
return AttachmentScore()
def _get_transformer_transform_from_transforms(self, transform: Union[
TransformList, TransformerSequenceTokenizer]) -> TransformerSequenceTokenizer:
def _get():
if isinstance(transform, TransformerSequenceTokenizer):
# noinspection PyTypeChecker
return transform
elif isinstance(transform, TransformList):
# noinspection PyTypeChecker,PyArgumentList
for each in transform:
if isinstance(each, TransformerSequenceTokenizer):
return each
if self._transformer_transform is None:
self._transformer_transform = _get()
return self._transformer_transform
def _get_transformer(self):
embed = self.model.embed
if isinstance(embed, ContextualWordEmbeddingModule):
return embed
if isinstance(embed, ConcatModuleList):
for each in embed:
if isinstance(each, ContextualWordEmbeddingModule):
return each
def _get_transformer_builder(self):
embed: Embedding = self.config.embed
if isinstance(embed, ContextualWordEmbedding):
return embed
if isinstance(embed, EmbeddingList):
for each in embed.to_list():
if isinstance(embed, ContextualWordEmbedding):
return each
def _transformer_trainable(self):
builder = self._get_transformer_builder()
if not builder:
return False
return builder.trainable
| [
"torch.no_grad",
"torch.optim.lr_scheduler.ExponentialLR"
] | 1.7.1 | emorynlp/levi-graph-amr-parser | f71f1056c13181b8db31d6136451fb8d57114819 |
1.8 | import sys
sys.path.append("..")
import os
import math
import torch
import torchvision
import model.E.Ablation_Study.E_Blur_Z as BE
from model.utils.custom_adam import LREQAdam
import metric.pytorch_ssim as pytorch_ssim
import lpips
import numpy as np
import tensorboardX
import argparse
from model.stylegan1.net import Generator, Mapping #StyleGANv1
from training_utils import *
def train(tensor_writer = None, args = None):
type = args.mtype
model_path = args.checkpoint_dir_GAN
if type == 1: # StyleGAN1
#model_path = './checkpoint/stylegan_v1/ffhq1024/'
Gs = Generator(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)
Gs.load_state_dict(torch.load(model_path+'Gs_dict.pth'))
Gm = Mapping(num_layers=int(math.log(args.img_size,2)-1)*2, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512) #num_layers: 14->256 / 16->512 / 18->1024
Gm.load_state_dict(torch.load(model_path+'Gm_dict.pth'))
Gm.buffer1 = torch.load(model_path+'./center_tensor.pt')
const_ = Gs.const
const1 = const_.repeat(args.batch_size,1,1,1).detach().clone().cuda()
layer_num = int(math.log(args.img_size,2)-1)*2 # 14->256 / 16 -> 512 / 18->1024
layer_idx = torch.arange(layer_num)[np.newaxis, :, np.newaxis] # shape:[1,18,1], layer_idx = [0,1,2,3,4,5,6。。。,17]
ones = torch.ones(layer_idx.shape, dtype=torch.float32) # shape:[1,18,1], ones = [1,1,1,1,1,1,1,1]
coefs = torch.where(layer_idx < layer_num//2, 0.7 * ones, ones) # 18个变量前8个裁剪比例truncation_psi [0.7,0.7,...,1,1,1]
coefs = coefs.cuda()
Gs.cuda()
Gm.cuda()
E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)
else:
print('error')
return
if args.checkpoint_dir_E != None:
E.load_state_dict(torch.load(args.checkpoint_dir_E))
E.cuda()
writer = tensor_writer
E_optimizer = LREQAdam([{'params': E.parameters()},], lr=args.lr, betas=(args.beta_1, 0.99), weight_decay=0)
loss_lpips = lpips.LPIPS(net='vgg').to('cuda')
batch_size = args.batch_size
it_d = 0
for iteration in range(0,args.iterations):
set_seed(iteration%30000)
z_c1 = torch.randn(batch_size, args.z_dim).cuda() #[n, 512]
if type == 1:
w1 = Gm(z_c1,coefs_m=coefs) #[batch_size,18,512]
imgs1 = Gs.forward(w1,int(math.log(args.img_size,2)-2)) # 7->512 / 6->256
z_c2, _ = E(imgs1)
z_c2 = z_c2.squeeze(-1).squeeze(-1)
w2 = Gm(z_c2,coefs_m=coefs)
imgs2 = Gs.forward(w2,int(math.log(args.img_size,2)-2))
else:
print('model type error')
return
E_optimizer.zero_grad()
#loss Images
loss_imgs, loss_imgs_info = space_loss(imgs1,imgs2,lpips_model=loss_lpips)
loss_msiv = loss_imgs
E_optimizer.zero_grad()
loss_msiv.backward(retain_graph=True)
E_optimizer.step()
#Latent-Vectors
## w
#loss_w, loss_w_info = space_loss(w1,w2,image_space = False)
## c
loss_c, loss_c_info = space_loss(z_c1,z_c2,image_space = False)
loss_mslv = loss_c*0.01
E_optimizer.zero_grad()
loss_mslv.backward()
E_optimizer.step()
print('ep_%d_iter_%d'%(iteration//30000,iteration%30000))
print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]')
print('---------ImageSpace--------')
print('loss_imgs_info: %s'%loss_imgs_info)
print('---------LatentSpace--------')
print('loss_c_info: %s'%loss_c_info)
it_d += 1
writer.add_scalar('loss_imgs_mse', loss_imgs_info[0][0], global_step=it_d)
writer.add_scalar('loss_imgs_mse_mean', loss_imgs_info[0][1], global_step=it_d)
writer.add_scalar('loss_imgs_mse_std', loss_imgs_info[0][2], global_step=it_d)
writer.add_scalar('loss_imgs_kl', loss_imgs_info[1], global_step=it_d)
writer.add_scalar('loss_imgs_cosine', loss_imgs_info[2], global_step=it_d)
writer.add_scalar('loss_imgs_ssim', loss_imgs_info[3], global_step=it_d)
writer.add_scalar('loss_imgs_lpips', loss_imgs_info[4], global_step=it_d)
writer.add_scalar('loss_c_mse', loss_c_info[0][0], global_step=it_d)
writer.add_scalar('loss_c_mse_mean', loss_c_info[0][1], global_step=it_d)
writer.add_scalar('loss_c_mse_std', loss_c_info[0][2], global_step=it_d)
writer.add_scalar('loss_c_kl', loss_c_info[1], global_step=it_d)
writer.add_scalar('loss_c_cosine', loss_c_info[2], global_step=it_d)
writer.add_scalar('loss_c_ssim', loss_c_info[3], global_step=it_d)
writer.add_scalar('loss_c_lpips', loss_c_info[4], global_step=it_d)
writer.add_scalars('Latent Space C', {'loss_c_mse':loss_c_info[0][0],'loss_c_mse_mean':loss_c_info[0][1],'loss_c_mse_std':loss_c_info[0][2],'loss_c_kl':loss_c_info[1],'loss_c_cosine':loss_c_info[2]}, global_step=it_d)
if iteration % 100 == 0:
n_row = batch_size
test_img = torch.cat((imgs1[:n_row],imgs2[:n_row]))*0.5+0.5
torchvision.utils.save_image(test_img, resultPath1_1+'/ep%d_iter%d.jpg'%(iteration//30000,iteration%30000),nrow=n_row) # nrow=3
with open(resultPath+'/Loss.txt', 'a+') as f:
print('i_'+str(iteration),file=f)
print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]',file=f)
print('---------ImageSpace--------',file=f)
print('loss_imgs_info: %s'%loss_imgs_info,file=f)
print('---------LatentSpace--------',file=f)
print('loss_c_info: %s'%loss_c_info,file=f)
if iteration % 5000 == 0:
torch.save(E.state_dict(), resultPath1_2+'/E_model_ep%d_iter%d.pth'%(iteration//30000,iteration%30000))
#torch.save(Gm.buffer1,resultPath1_2+'/center_tensor_iter%d.pt'%iteration)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='the training args')
parser.add_argument('--iterations', type=int, default=60001) # epoch = iterations//30000
parser.add_argument('--lr', type=float, default=0.0015)
parser.add_argument('--beta_1', type=float, default=0.0)
parser.add_argument('--batch_size', type=int, default=2)
parser.add_argument('--experiment_dir', default=None) #None
parser.add_argument('--checkpoint_dir_GAN', default='../checkpoint/stylegan_v1/ffhq1024/') #None ./checkpoint/stylegan_v1/ffhq1024/ or ./checkpoint/stylegan_v2/stylegan2_ffhq1024.pth or ./checkpoint/biggan/256/G-256.pt
parser.add_argument('--config_dir', default='./checkpoint/biggan/256/biggan-deep-256-config.json') # BigGAN needs it
parser.add_argument('--checkpoint_dir_E', default=None)
parser.add_argument('--img_size',type=int, default=1024)
parser.add_argument('--img_channels', type=int, default=3)# RGB:3 ,L:1
parser.add_argument('--z_dim', type=int, default=512) # PGGAN , StyleGANs are 512. BIGGAN is 128
parser.add_argument('--mtype', type=int, default=1) # StyleGANv1=1, StyleGANv2=2, PGGAN=3, BigGAN=4
parser.add_argument('--start_features', type=int, default=16) # 16->1024 32->512 64->256
args = parser.parse_args()
if not os.path.exists('./result'): os.mkdir('./result')
resultPath = args.experiment_dir
if resultPath == None:
resultPath = "./result/StyleGANv1-AlationStudy-Z"
if not os.path.exists(resultPath): os.mkdir(resultPath)
resultPath1_1 = resultPath+"/imgs"
if not os.path.exists(resultPath1_1): os.mkdir(resultPath1_1)
resultPath1_2 = resultPath+"/models"
if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2)
writer_path = os.path.join(resultPath, './summaries')
if not os.path.exists(writer_path): os.mkdir(writer_path)
writer = tensorboardX.SummaryWriter(writer_path)
use_gpu = True
device = torch.device("cuda" if use_gpu else "cpu")
train(tensor_writer=writer, args = args)
| [
"torch.device",
"torch.cat",
"torch.arange",
"torch.ones",
"torch.load",
"torch.randn",
"torch.where"
] | 1.8 | disanda/MSV | 066ed236a4c5df8b4b5e366020fe2954b7a6915a |
0.4 | import os.path
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image, ImageOps
import torch
class Aligned3TmMaxDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_ABC = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.ABC_paths = sorted(make_dataset(self.dir_ABC, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
self.input2_nc = self.opt.input2_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
C (tensor) - - an alternative image in the input domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
C_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
ABC_path = self.ABC_paths[index]
ABC = Image.open(ABC_path).convert('RGB')
# split AB image into A and B
w, h = ABC.size
h25 = int(h / 25)
w3 = int(w / 3)
A = []
B = []
C = []
for i in range(25):
A.append(ABC.crop((0, h25*i, w3, h25*(i+1))))
B.append(ABC.crop((w3, h25*i, w3*2, h25*(i+1))))
Ctmp = ImageOps.flip(ABC.crop((w3*2, h25*i, w, h25*(i+1))))
Ctmp = Ctmp.convert("L")
_, vmax = Ctmp.getextrema()
Ctmp = Ctmp.point(lambda x: 0 if x < vmax else 255)
C.append(Ctmp)
# apply the same transform to both A and B
transform_params = get_params(self.opt, A[0].size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
C_transform = get_transform(self.opt, transform_params, grayscale=(self.input2_nc == 1), convert=False)
for i in range(25):
A[i] = A_transform(A[i])
B[i] = B_transform(B[i])
C[i] = C_transform(C[i])
Acat = torch.unsqueeze(A[0], 0)
Bcat = torch.unsqueeze(B[0], 0)
Ccat = torch.unsqueeze(C[0], 0)
for i in range(1,25):
Acat = torch.cat([Acat, torch.unsqueeze(A[i], 0)], dim=0)
Bcat = torch.cat([Bcat, torch.unsqueeze(B[i], 0)], dim=0)
Ccat = torch.cat([Ccat, torch.unsqueeze(C[i], 0)], dim=0)
# print('Acat size:', Acat.size())
# print('A_trans:', A.max(), A.min())
# print('B_trans:', B.max(), B.min())
# print('C_trans:', C.max(), C.min())
return {'A': Acat, 'B': Bcat, 'C': Ccat, 'A_paths': ABC_path, 'B_paths': ABC_path, 'C_paths': ABC_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.ABC_paths)
| [
"torch.unsqueeze"
] | 0.4.1 | tkuri/pytorch-CycleGAN-and-pix2pix | b00b3f0bcebfb12d3f026c2a61c98ff63175a583 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from time import sleep
from typing import Any, Callable, Dict, List, Optional, Union
import __main__
import numpy as np
import torch
import torch.distributed
from torch.distributed import GradBucket
from torch.nn import Module
from torch.nn.parallel.distributed import DistributedDataParallel
import pytorch_lightning as pl
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.overrides import LightningDistributedModule
from pytorch_lightning.overrides.distributed import prepare_for_backward
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.precision import PrecisionPlugin
from pytorch_lightning.strategies.parallel import ParallelStrategy
from pytorch_lightning.strategies.strategy import TBroadcast
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import (
_FAIRSCALE_AVAILABLE,
_HYDRA_AVAILABLE,
_IS_WINDOWS,
_TORCH_GREATER_EQUAL_1_8,
_TORCH_GREATER_EQUAL_1_9,
_TORCH_GREATER_EQUAL_1_10,
)
from pytorch_lightning.utilities.distributed import _revert_sync_batchnorm, distributed_available
from pytorch_lightning.utilities.distributed import group as _group
from pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available
from pytorch_lightning.utilities.enums import _StrategyType
from pytorch_lightning.utilities.exceptions import DeadlockDetectedException
from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn
from pytorch_lightning.utilities.seed import reset_seed
from pytorch_lightning.utilities.types import STEP_OUTPUT
if _FAIRSCALE_AVAILABLE:
from fairscale.optim import OSS
if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path
if _TORCH_GREATER_EQUAL_1_8:
from pytorch_lightning.utilities.distributed import register_ddp_comm_hook
log = logging.getLogger(__name__)
class DDPStrategy(ParallelStrategy):
"""Plugin for multi-process single-device training on one or multiple nodes.
The main process in each node spawns N-1 child processes via :func:`subprocess.Popen`, where N is the number of
devices (e.g. GPU) per node. It is very similar to how :mod:`torch.distributed.launch` launches processes.
"""
distributed_backend = _StrategyType.DDP
def __init__(
self,
accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[PrecisionPlugin] = None,
ddp_comm_state: Optional[object] = None,
ddp_comm_hook: Optional[Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]]] = None,
ddp_comm_wrapper: Optional[
Callable[
[Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]], Any],
torch.futures.Future[torch.Tensor],
]
] = None,
model_averaging_period: Optional[int] = None,
**kwargs: Union[Any, Dict[str, Any]],
) -> None:
super().__init__(
accelerator=accelerator,
parallel_devices=parallel_devices,
cluster_environment=cluster_environment,
checkpoint_io=checkpoint_io,
precision_plugin=precision_plugin,
)
log.detail(f"{self.__class__.__name__}: initializing DDP plugin")
self.interactive_ddp_procs: List[subprocess.Popen] = []
self._num_nodes = 1
self.sync_batchnorm = False
self._ddp_kwargs = kwargs
self._ddp_comm_state = ddp_comm_state
self._ddp_comm_hook = ddp_comm_hook
self._ddp_comm_wrapper = ddp_comm_wrapper
self._model_averaging_period = model_averaging_period
self._pids: Optional[List[int]] = None
self._sync_dir: Optional[str] = None
self._rank_0_has_called_call_children_scripts: bool = False
self.set_world_ranks()
@property
def is_distributed(self) -> bool:
return True
@property
def root_device(self) -> torch.device:
return self.parallel_devices[self.local_rank]
@property
def num_nodes(self) -> int:
return self._num_nodes
@num_nodes.setter
def num_nodes(self, num_nodes: int) -> None:
# note that world ranks is related to num_nodes, when resetting it, need to reset world ranks
self._num_nodes = num_nodes
self.set_world_ranks()
@property
def num_processes(self) -> int:
return len(self.parallel_devices) if self.parallel_devices is not None else 0
@property
def distributed_sampler_kwargs(self) -> Dict[str, Any]:
distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank)
return distributed_sampler_kwargs
@property
def _is_single_process_single_device(self) -> bool:
return True
def setup_environment(self) -> None:
# start the other scripts
assert self.cluster_environment is not None
if not self.cluster_environment.creates_processes_externally:
self._call_children_scripts()
self.setup_distributed()
super().setup_environment()
def setup(self, trainer: "pl.Trainer") -> None:
super().setup(trainer)
# share ddp pids to all processes
self._rank_0_has_called_call_children_scripts = (
self.broadcast(self._rank_0_has_called_call_children_scripts) is True
)
if self._should_run_deadlock_detection():
self._share_information_to_prevent_deadlock()
# move the model to the correct device
self.model_to_device()
assert self.model is not None
if self.sync_batchnorm:
self.model = self.configure_sync_batchnorm(self.model)
# skip wrapping the model if we are not fitting as no gradients need to be exchanged
assert self.lightining_module is not None
trainer_fn = self.lightning_module.trainer.state.fn
if trainer_fn == TrainerFn.FITTING:
self.configure_ddp()
def _setup_model(self, model: Module) -> DistributedDataParallel:
"""Wraps the model into a :class:`~torch.nn.parallel.distributed.DistributedDataParallel` module."""
device_ids = self.determine_ddp_device_ids()
log.detail(f"setting up DDP model with device ids: {device_ids}, kwargs: {self._ddp_kwargs}")
return DistributedDataParallel(module=model, device_ids=device_ids, **self._ddp_kwargs)
def _call_children_scripts(self) -> None:
# bookkeeping of spawned processes
self._check_can_spawn_children()
assert self.cluster_environment is not None
# DDP Environment variables
os.environ["MASTER_ADDR"] = self.cluster_environment.main_address
os.environ["MASTER_PORT"] = str(self.cluster_environment.main_port)
# allow the user to pass the node rank
os.environ["NODE_RANK"] = str(self.cluster_environment.node_rank())
os.environ["LOCAL_RANK"] = str(self.cluster_environment.local_rank())
# Check if the current calling command looked like `python a/b/c.py` or `python -m a.b.c`
# See https://docs.python.org/3/reference/import.html#main-spec
if __main__.__spec__ is None: # pragma: no-cover
# Script called as `python a/b/c.py`
# when user is using hydra find the absolute path
path_lib = os.path.abspath if not _HYDRA_AVAILABLE else to_absolute_path
# pull out the commands used to run the script and resolve the abs file path
command = sys.argv
try:
full_path = path_lib(command[0])
except Exception:
full_path = os.path.abspath(command[0])
command[0] = full_path
# use the same python interpreter and actually running
command = [sys.executable] + command
else: # Script called as `python -m a.b.c`
command = [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:]
os.environ["WORLD_SIZE"] = f"{self.num_processes * self.num_nodes}"
self.interactive_ddp_procs = []
for local_rank in range(1, self.num_processes):
env_copy = os.environ.copy()
env_copy["LOCAL_RANK"] = f"{local_rank}"
# remove env var if global seed not set
if os.environ.get("PL_GLOBAL_SEED") is None and "PL_GLOBAL_SEED" in env_copy:
del env_copy["PL_GLOBAL_SEED"]
# start process
# if hydra is available and initialized, make sure to set the cwd correctly
cwd: Optional[str] = None
if _HYDRA_AVAILABLE:
if HydraConfig.initialized():
cwd = get_original_cwd()
os_cwd = f'"{os.getcwd()}"'
command += [f"hydra.run.dir={os_cwd}", f"hydra.job.name=train_ddp_process_{local_rank}"]
proc = subprocess.Popen(command, env=env_copy, cwd=cwd)
self.interactive_ddp_procs.append(proc)
# starting all processes at once can cause issues
# with dataloaders delay between 1-10 seconds
delay = np.random.uniform(1, 5, 1)[0]
sleep(delay)
self._rank_0_has_called_call_children_scripts = True
def setup_distributed(self) -> None:
log.detail(f"{self.__class__.__name__}: setting up distributed...")
reset_seed()
# determine which process we are and world size
self.set_world_ranks()
# set warning rank
rank_zero_only.rank = self.global_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
assert self.cluster_environment is not None
init_dist_connection(self.cluster_environment, self.torch_distributed_backend)
def _check_can_spawn_children(self) -> None:
if self.local_rank != 0:
raise RuntimeError(
"Lightning attempted to launch new distributed processes with `local_rank > 0`. This should not happen."
" Possible reasons: 1) LOCAL_RANK environment variable was incorrectly modified by the user,"
" 2) `ClusterEnvironment.creates_processes_externally` incorrectly implemented."
)
def set_world_ranks(self) -> None:
if self.cluster_environment is None:
return
self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)
self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)
rank_zero_only.rank = self.cluster_environment.global_rank()
def pre_configure_ddp(self) -> None:
# if unset, default `find_unused_parameters` `True`
# Many models require setting this parameter to True, as there are corner cases
# when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.
# This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True)
assert self.lightning_module is not None
if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get(
"find_unused_parameters", False
):
# TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
rank_zero_warn(
"From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to"
" properly work with DDP. Using `find_unused_parameters=True`."
)
self._ddp_kwargs["find_unused_parameters"] = True
def _register_ddp_hooks(self) -> None:
log.detail(f"{self.__class__.__name__}: registering ddp hooks")
# In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode
# Since 1.9, DDP communication hooks can work on all backends.
if _TORCH_GREATER_EQUAL_1_9 or (
_TORCH_GREATER_EQUAL_1_8 and self.root_device.type == "cuda" and self._is_single_process_single_device
):
register_ddp_comm_hook(
model=self.model,
ddp_comm_state=self._ddp_comm_state,
ddp_comm_hook=self._ddp_comm_hook,
ddp_comm_wrapper=self._ddp_comm_wrapper,
)
if _TORCH_GREATER_EQUAL_1_10 and self.lightning_module.trainer.state.fn == TrainerFn.FITTING:
import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
if isinstance(self._ddp_comm_state, post_localSGD.PostLocalSGDState):
self._reinit_optimizers_with_post_localSGD(self._ddp_comm_state.start_localSGD_iter)
def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int) -> None:
log.detail(f"{self.__class__.__name__}: reinitializing optimizers with post localSGD")
optimizers = self.optimizers
if self._model_averaging_period is None:
raise ValueError(
"Post-localSGD algorithm is used, but model averaging period is not provided to DDP strategy."
)
if _TORCH_GREATER_EQUAL_1_10:
if not _IS_WINDOWS:
from torch.distributed.optim import DistributedOptimizer
import torch.distributed.algorithms.model_averaging.averagers as averagers
from torch.distributed.optim import PostLocalSGDOptimizer, ZeroRedundancyOptimizer
averager = averagers.PeriodicModelAverager(period=self._model_averaging_period, warmup_steps=warmup_steps)
for x, optimizer in enumerate(optimizers):
if isinstance(optimizer, LightningOptimizer):
optimizer = optimizer._optimizer
is_distributed_optimizer = isinstance(optimizer, DistributedOptimizer) if not _IS_WINDOWS else False
if (
is_distributed_optimizer
or isinstance(optimizer, ZeroRedundancyOptimizer)
or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS))
):
raise ValueError(
f"Cannot wrap a distributed optimizer of type {optimizer.__name__} by PostLocalSGDOptimizer."
)
if isinstance(optimizer, PostLocalSGDOptimizer):
continue
optim_class = type(optimizer)
post_localSGD_optimizer = PostLocalSGDOptimizer(
params=optimizer.param_groups,
optimizer_class=optim_class,
averager=averager,
**optimizer.defaults,
)
optimizers[x] = post_localSGD_optimizer
del optimizer
self.optimizers = optimizers
def configure_ddp(self) -> None:
log.detail(f"{self.__class__.__name__}: configuring DistributedDataParallel")
self.pre_configure_ddp()
assert self.model is not None
self.model = self._setup_model(LightningDistributedModule(self.model))
self._register_ddp_hooks()
def determine_ddp_device_ids(self) -> Optional[List[int]]:
if self.root_device.type == "cpu":
return None
return [self.root_device.index]
def barrier(self, *args, **kwargs) -> None:
if not distributed_available():
return
if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl":
torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())
else:
torch.distributed.barrier()
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
obj = [obj]
if self.global_rank != src:
obj = [None]
torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)
return obj[0]
def pre_backward(self, closure_loss: torch.Tensor) -> None:
"""Run before precision plugin executes backward."""
assert self.model is not None
assert self.lightning_module is not None
if not self.lightning_module.automatic_optimization:
prepare_for_backward(self.model, closure_loss)
def model_to_device(self) -> None:
log.detail(f"{self.__class__.__name__}: moving model to device [{self.root_device}]...")
if self.model:
self.model.to(self.root_device)
def reduce(
self, tensor: torch.Tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str, None] = "mean"
) -> torch.Tensor:
"""Reduces a tensor from several distributed processes to one aggregated tensor.
Args:
tensor: the tensor to sync and reduce
group: the process group to gather results from. Defaults to all processes (world)
reduce_op: the reduction operation. Defaults to 'mean'/'avg'.
Can also be a string 'sum' to calculate the sum during reduction.
Return:
reduced value, except when the input was not a tensor the output remains is unchanged
"""
if isinstance(tensor, torch.Tensor):
tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
return tensor
def training_step(self, *args, **kwargs) -> STEP_OUTPUT:
with self.precision_plugin.train_step_context():
assert self.model is not None
return self.model(*args, **kwargs)
def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
with self.precision_plugin.val_step_context():
if isinstance(self.model, DistributedDataParallel):
# used when calling `trainer.fit`
return self.model(*args, **kwargs)
else:
# used when calling `trainer.validate`
assert self.lightning_module is not None
return self.lightning_module.validation_step(*args, **kwargs)
def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
with self.precision_plugin.test_step_context():
assert self.lightning_module is not None
return self.lightning_module.test_step(*args, **kwargs)
def predict_step(self, *args, **kwargs) -> STEP_OUTPUT:
with self.precision_plugin.predict_step_context():
assert self.lightning_module is not None
return self.lightning_module.predict_step(*args, **kwargs)
def post_training_step(self) -> None:
assert self.model is not None
assert self.lightning_module is not None
if not self.lightning_module.automatic_optimization:
self.model.require_backward_grad_sync = True
@classmethod
def register_strategies(cls, strategy_registry: Dict) -> None:
strategy_registry.register(
"ddp_find_unused_parameters_false",
cls,
description="DDP Strategy with `find_unused_parameters` as False",
find_unused_parameters=False,
)
def _should_run_deadlock_detection(self) -> bool:
"""Determines whether the plugin will perform process reconciliation in case of errors.
If the environment variable `PL_RECONCILE_PROCESS` is set, run detection regardless of the cluster environment.
By default this is disabled. Otherwise, if the cluster environment creates the processes, allow the scheduler /
parent process to perform the process termination, external to Lightning.
"""
return os.getenv("PL_RECONCILE_PROCESS", "0") == "1" or self._rank_0_has_called_call_children_scripts
def _share_information_to_prevent_deadlock(self) -> None:
self._share_pids()
# there should be a unique sync_dir per nodes.
if self.local_rank == 0:
# create a temporary directory used to synchronize processes on deadlock.
self._sync_dir = tempfile.mkdtemp()
sync_dirs = []
global_node_rank_zero = 0
for _ in range(self.num_nodes):
sync_dirs.append(self.broadcast(self._sync_dir, global_node_rank_zero))
global_node_rank_zero += self.world_size // self.num_nodes
self._sync_dir = sync_dirs[self.node_rank]
def _share_pids(self) -> None:
"""Make all DDP processes aware of all processes pids."""
self.barrier()
pids = self.all_gather(torch.tensor(os.getpid(), device=self.root_device))
pids = pids.cpu().numpy().tolist()
self._pids = pids if isinstance(pids, list) else [pids]
def reconciliate_processes(self, trace: str) -> None:
if self.world_size < 2:
return
if not self._should_run_deadlock_detection():
return
sync_dir = self._sync_dir
if not sync_dir:
rank_zero_warn("Error handling mechanism for deadlock detection is uninitialized. Skipping check.")
return
# The cluster may be configured to periodically purge the `/tmp`
# directory, in which case `sync_dir` may not exist anymore at this
# point. Idempotently create it to ensure its existence.
Path(sync_dir).mkdir(parents=True, exist_ok=True)
# save a file locally.
torch.save(True, os.path.join(sync_dir, f"{self.global_rank}.pl"))
# sleep for a short time
time.sleep(3)
# return if all processes wrote a file in the `sync_dir`.
# todo (tchaton) Add support for non-shared file-system which will fail.
if len(os.listdir(sync_dir)) == (self.world_size // self.num_nodes):
return
if not self._pids:
return
for pid in self._pids:
if pid != os.getpid():
os.kill(pid, signal.SIGKILL)
shutil.rmtree(sync_dir)
raise DeadlockDetectedException(f"DeadLock detected from rank: {self.global_rank} \n {trace}")
def teardown(self) -> None:
log.detail(f"{self.__class__.__name__}: tearing down DDP plugin")
super().teardown()
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
assert self.model is not None
if self.sync_batchnorm:
self.model = _revert_sync_batchnorm(self.model)
if self.root_device.type == "cuda":
# GPU teardown
log.detail(f"{self.__class__.__name__}: moving model to CPU")
assert self.lightning_module is not None
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
| [
"torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager",
"torch.distributed.optim.PostLocalSGDOptimizer",
"torch.distributed.broadcast_object_list",
"torch.nn.parallel.distributed.DistributedDataParallel",
"torch.distributed.get_backend",
"torch.cuda.empty_cache",
"torch.distributed.barrier"
] | 1.7 | alat-rights/pytorch-lightning | a4f1f3dc28982eb6578df62ca92b93f83a2defcc |
1.5 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import numpy as np
from utils.utils import AverageMeter
from tqdm import tqdm
# from utils.visualize import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
class ExperimentRunnerBase:
def __init__(self, args):
# Set the LR Scheduler and Loss Parameters
if args.scheduler == 'plateau':
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
factor=0.5,
patience=3,
mode='max',
verbose=True)
elif args.scheduler == 'cycle':
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer,
max_lr=args.learning_rate,
steps_per_epoch=len(self.train_loader),
epochs=args.num_epochs)
self.criterion = torch.nn.CrossEntropyLoss()
self.visualize = args.visualize
if self.visualize:
print("if visualize is true this line will run")
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter()
# Training specific params
self.args = args
self.num_epochs = args.num_epochs
self.print_every = args.print_every
self.val_every = args.val_every
self.model_dir = args.model_dir
self.save_every = args.save_every
def train(self):
# Setting the variables before starting the training
avg_train_loss = AverageMeter()
avg_train_acc = AverageMeter()
best_val_acc = -np.inf
for epoch in range(self.num_epochs):
avg_train_loss.reset()
avg_train_acc.reset()
# Mini batch loop
for batch_idx, batch in enumerate(tqdm(self.train_loader)):
step = epoch * len(self.train_loader) + batch_idx
# Get the model output for the batch and update the loss and accuracy meters
train_loss, train_acc = self.train_step(batch)
if self.args.scheduler == 'cycle':
self.scheduler.step()
avg_train_loss.update([train_loss.item()])
avg_train_acc.update([train_acc])
# Save the step checkpoint if needed
# if step % self.save_every == 0:
# step_chkpt_path = os.path.join(self.model_dir,
# 'step_chkpt_{}_{}.pth'.format(epoch, step))
# print("Saving the model checkpoint for epoch {} at step {}".format(epoch, step))
# torch.save(self.model.state_dict(), step_chkpt_path)
# Logging and validation check
if step % self.print_every == 0:
print('Epoch {}, batch {}, step {}, '
'loss = {:.4f}, acc = {:.4f}, '
'running averages: loss = {:.4f}, acc = {:.4f}'.format(epoch,
batch_idx,
step,
train_loss.item(),
train_acc,
avg_train_loss.get(),
avg_train_acc.get()))
if step % self.val_every == 0:
val_loss, val_acc = self.val()
print('Val acc = {:.4f}, Val loss = {:.4f}'.format(val_acc, val_loss))
if self.visualize:
self.writer.add_scalar('Val/loss', val_loss, step)
self.writer.add_scalar('Val/acc', val_acc, step)
# Update the save the best validation checkpoint if needed
if val_acc > best_val_acc:
best_val_acc = val_acc
best_chkpt_path = os.path.join(self.model_dir,
'best_ckpt.pth')
torch.save(self.model.state_dict(), best_chkpt_path)
if self.args.scheduler == 'plateau':
self.scheduler.step(val_acc)
if self.visualize:
# Log data to
self.writer.add_scalar('Train/loss', train_loss.item(), step)
self.writer.add_scalar('Train/acc', train_acc, step)
def compute_loss(self, batch):
""" This function is specific to the kind of model we are training and must be implemented """
raise NotImplementedError
def train_step(self, batch):
self.model.train()
self.optimizer.zero_grad()
metrics = self.compute_loss(batch)
metrics['loss'].backward()
self.optimizer.step()
return metrics['loss'], metrics['accuracy']
def load_model_for_eval(self):
chkpt_path = os.path.join(self.model_dir, 'best_ckpt.pth') \
if self.args.eval_checkpoint_path is None else self.args.eval_checkpoint_path
self.model.load_state_dict(torch.load(chkpt_path))
self.model.eval()
@torch.no_grad()
def val(self):
print('VALIDATING:')
avg_val_loss = AverageMeter()
avg_val_acc = AverageMeter()
self.model.eval()
for batch_idx, batch in enumerate(tqdm(self.val_loader)):
metrics = self.compute_loss(batch)
avg_val_acc.update(metrics['correct'].cpu().numpy())
avg_val_loss.update([metrics['loss']])
return avg_val_loss.get(), avg_val_acc.get()
@torch.no_grad()
def infer(self):
self.load_model_for_eval()
avg_test_loss = AverageMeter()
avg_test_acc = AverageMeter()
all_true_labels = []
all_pred_labels = []
all_audio_embeddings = []
all_text_embeddings = []
for batch_idx, batch in enumerate(tqdm(self.test_loader)):
# Get the model output and update the meters
output = self.compute_loss(batch)
avg_test_acc.update(output['correct'].cpu().numpy())
avg_test_loss.update([output['loss']])
# Store the Predictions
all_true_labels.append(batch['label'].cpu())
all_pred_labels.append(output['predicted'].cpu())
all_audio_embeddings.append(output['model_output']['audio_embed'].cpu())
all_text_embeddings.append(output['model_output']['text_embed'].cpu())
# Collect the predictions and embeddings for the full set
all_true_labels = torch.cat(all_true_labels).numpy()
all_pred_labels = torch.cat(all_pred_labels).numpy()
all_audio_embeddings = torch.cat(all_audio_embeddings).numpy()
all_text_embeddings = torch.cat(all_text_embeddings).numpy()
# Save the embeddings and plot the confusion matrix
np.savez_compressed('embeddings.npz',
audio=all_audio_embeddings,
text=all_text_embeddings,
labels=all_true_labels)
# cm = confusion_matrix(all_true_labels, all_pred_labels)
# plot_confusion_matrix(cm, self.test_loader.dataset.labels_list(), normalize=True)
print('Final test acc = {:.4f}, test loss = {:.4f}'.format(avg_test_acc.get(), avg_test_loss.get()))
return avg_test_loss.get(), avg_test_acc.get()
| [
"torch.cat",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.load",
"torch.utils.tensorboard.SummaryWriter"
] | 1.5.0 | Emmyphung/flexible-input-slu | a2c7fff640b2b4aec830f3ca1b447c28dc506bb4 |
1.7 | import cv2
import numpy as np
import torch
import torch.nn as nn
from . import common
from .agent import Agent
from .controller import PIDController, CustomController
from .controller import ls_circle
STEPS = 5
SPEED_STEPS = 3
COMMANDS = 4
DT = 0.1
CROP_SIZE = 192
PIXELS_PER_METER = 5
def regression_base():
return nn.Sequential(
nn.ConvTranspose2d(640,256,4,2,1,0),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256,128,4,2,1,0),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128,64,4,2,1,0),
nn.BatchNorm2d(64),
nn.ReLU(True))
def spatial_softmax_base():
return nn.Sequential(
nn.BatchNorm2d(640),
nn.ConvTranspose2d(640,256,3,2,1,1),
nn.ReLU(True),
nn.BatchNorm2d(256),
nn.ConvTranspose2d(256,128,3,2,1,1),
nn.ReLU(True),
nn.BatchNorm2d(128),
nn.ConvTranspose2d(128,64,3,2,1,1),
nn.ReLU(True))
class BirdViewPolicyModelSS(common.ResnetBase):
def __init__(self, backbone='resnet18', input_channel=7, n_step=5, all_branch=False, **kwargs):
super().__init__(backbone=backbone, input_channel=input_channel, bias_first=False)
self.deconv = spatial_softmax_base()
self.location_pred = nn.ModuleList([
nn.Sequential(
nn.BatchNorm2d(64),
nn.Conv2d(64,STEPS,1,1,0),
common.SpatialSoftmax(48,48,STEPS)) for i in range(COMMANDS)
])
self.all_branch = all_branch
def forward(self, bird_view, velocity, command):
h = self.conv(bird_view)
b, c, kh, kw = h.size()
# Late fusion for velocity
velocity = velocity[...,None,None,None].repeat((1,128,kh,kw))
h = torch.cat((h, velocity), dim=1)
h = self.deconv(h)
location_preds = [location_pred(h) for location_pred in self.location_pred]
location_preds = torch.stack(location_preds, dim=1)
location_pred = common.select_branch(location_preds, command)
if self.all_branch:
return location_pred, location_preds
return location_pred
class BirdViewAgent(Agent):
def __init__(self, steer_points=None, pid=None, gap=5, **kwargs):
super().__init__(**kwargs)
self.speed_control = PIDController(K_P=1.0, K_I=0.1, K_D=2.5)
if steer_points is None:
steer_points = {"1": 3, "2": 2, "3": 2, "4": 2}
if pid is None:
pid = {
"1": {"Kp": 1.0, "Ki": 0.1, "Kd": 0}, # Left
"2": {"Kp": 1.0, "Ki": 0.1, "Kd": 0}, # Right
"3": {"Kp": 0.8, "Ki": 0.1, "Kd": 0}, # Straight
"4": {"Kp": 0.8, "Ki": 0.1, "Kd": 0}, # Follow
}
self.turn_control = CustomController(pid)
self.steer_points = steer_points
self.gap = gap
def run_step(self, observations, teaching=False):
birdview = common.crop_birdview(observations['birdview'], dx=-10)
speed = np.linalg.norm(observations['velocity'])
command = self.one_hot[int(observations['command']) - 1]
with torch.no_grad():
_birdview = self.transform(birdview).to(self.device).unsqueeze(0)
_speed = torch.FloatTensor([speed]).to(self.device)
_command = command.to(self.device).unsqueeze(0)
if self.model.all_branch:
_locations, _ = self.model(_birdview, _speed, _command)
else:
_locations = self.model(_birdview, _speed, _command)
_locations = _locations.squeeze().detach().cpu().numpy()
_map_locations = _locations
# Pixel coordinates.
_locations = (_locations + 1) / 2 * CROP_SIZE
targets = list()
for i in range(STEPS):
pixel_dx, pixel_dy = _locations[i]
pixel_dx = pixel_dx - CROP_SIZE / 2
pixel_dy = CROP_SIZE - pixel_dy
angle = np.arctan2(pixel_dx, pixel_dy)
dist = np.linalg.norm([pixel_dx, pixel_dy]) / PIXELS_PER_METER
targets.append([dist * np.cos(angle), dist * np.sin(angle)])
target_speed = 0.0
for i in range(1, SPEED_STEPS):
pixel_dx, pixel_dy = _locations[i]
prev_dx, prev_dy = _locations[i-1]
dx = pixel_dx - prev_dx
dy = pixel_dy - prev_dy
delta = np.linalg.norm([dx, dy])
target_speed += delta / (PIXELS_PER_METER * self.gap * DT) / (SPEED_STEPS-1)
_cmd = int(observations['command'])
n = self.steer_points.get(str(_cmd), 1)
targets = np.concatenate([[[0, 0]], targets], 0)
c, r = ls_circle(targets)
closest = common.project_point_to_circle(targets[n], c, r)
v = [1.0, 0.0, 0.0]
w = [closest[0], closest[1], 0.0]
alpha = common.signed_angle(v, w)
steer = self.turn_control.run_step(alpha, _cmd)
throttle = self.speed_control.step(target_speed - speed)
brake = 0.0
if target_speed < 1.0:
steer = 0.0
throttle = 0.0
brake = 1.0
self.debug['locations_birdview'] = _locations[:,::-1].astype(int)
self.debug['target'] = closest
self.debug['target_speed'] = target_speed
control = self.postprocess(steer, throttle, brake)
if teaching:
return control, _map_locations
else:
return control
| [
"torch.cat",
"torch.stack",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.no_grad",
"torch.FloatTensor",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.7.1 | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb |
1.7 | import copy
import math
import time
from pathlib import Path
import random
from kornia.filters import spatial_gradient
from pytorch_msssim import ssim, SSIM
import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from perception.custom_datasets import DepthDataset
from perception.training.models import createUNet, createUNetResNet
from perception.utils.visualization import display_images_horizontally
def depth_loss_function(y_pred, y_true, theta=0.1, maxDepthVal=1):
# from https://github.com/ialhashim/DenseDepth/blob/ed044069eb99fa06dd4af415d862b3b5cbfab283/loss.py
# and their paper https://arxiv.org/pdf/1812.11941.pdf
# with modifications for pytorch - using libraries pytorch_msssim and kornia
# Point-wise depth
l_depth = torch.mean(torch.abs(y_pred - y_true), dim=-1)
# Edges
d_true = spatial_gradient(y_true)
dx_true = d_true[:, :, 0, :, :]
dy_true = d_true[:, :, 1, :, :]
d_pred = spatial_gradient(y_pred)
dx_pred = d_pred[:, :, 0, :, :]
dy_pred = d_pred[:, :, 1, :, :]
l_edges = torch.mean(torch.abs(dy_pred - dy_true) + torch.abs(dx_pred - dx_true), dim=-1)
# Structural similarity (SSIM) index
l_ssim = torch.clip((1 - ssim(y_true, y_pred, maxDepthVal, nonnegative_ssim=True)) * 0.5, 0, 1)
# Weights
w1 = 1.0
w2 = 1.0
w3 = theta
return (w1 * l_ssim) + (w2 * torch.mean(l_edges)) + (w3 * torch.mean(l_depth))
def create_dataloaders(path, validation_set_size, batch_size=32, max_n_instances=None, augment_strategy=None,
num_workers=0, use_transform=None):
dataset = DepthDataset(root_folder=path, max_n_instances=max_n_instances,
augment_strategy=augment_strategy, use_transform=use_transform)
train_size = int((1 - validation_set_size) * len(dataset))
validation_size = len(dataset) - train_size
train_dataset, validation_dataset = random_split(dataset, [train_size, validation_size])
dataloaders = {"train": DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers),
"val": DataLoader(validation_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)}
return dataloaders
def train_model(model, dataloaders, criterion, optimizer, n_epochs, model_save_path, scheduler=None,
save_model_weights=True, display_img_after_epoch=0):
# determine the computational device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
model.to(device)
best_val_loss = math.inf
best_model_weights = copy.deepcopy(model.state_dict())
start = time.time()
last_time = start
# Tensorboard logging
train_log_path = model_save_path / "logs/train"
val_log_path = model_save_path / "logs/val"
train_log_path.mkdir(parents=True)
val_log_path.mkdir(parents=True)
writer_train = SummaryWriter(model_save_path / "logs/train")
writer_val = SummaryWriter(model_save_path / "logs/val")
for epoch in range(n_epochs):
print('-' * 10)
print('Epoch {}/{}'.format(epoch + 1, n_epochs))
for phase in ["train", "val"]:
if phase == "train":
model.train()
else:
model.eval()
running_loss = 0.0
display_images = None
for i, data in tqdm(enumerate(dataloaders[phase])):
# Get the inputs; data is a list of (RGB, semantic segmentation, depth maps).
rgb_input = data[0].to(device, dtype=torch.float32) # TODO så stor datatype?
#rgb_target = data[1].to(device, dtype=torch.float32)
depth_image = data[2].to(device, dtype=torch.float32)
rgb_raw = data[3]
# Find the size of rgb_image
input_size = rgb_input.size(0)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
with torch.set_grad_enabled(phase == "train"):
outputs = model(rgb_input)
#depth_image = torch.flatten(depth_image, start_dim=1)
# TODO loss
loss = criterion(outputs, depth_image)
# backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
if display_images is None:
# Save image as a numpy array. Used later for displaying predictions.
idx = random.randint(0, input_size-1)
display_images = [rgb_raw.cpu().numpy()[idx].transpose(1, 2, 0),
depth_image.cpu().numpy()[idx].reshape(160, 384),
outputs.detach().cpu().numpy()[idx].reshape(160, 384)]
# statistics
running_loss += loss.item() * input_size
if phase == 'train' and scheduler is not None:
scheduler.step()
dataset_size = len(dataloaders["train"].dataset) if phase == "train" else len(dataloaders["val"].dataset)
epoch_loss = running_loss / dataset_size
print('{} Loss: {:.6f}'.format(phase, epoch_loss))
# deep copy the model
if phase == "val" and epoch_loss < best_val_loss:
print("val loss record low, saving these weights...")
best_val_loss = epoch_loss
best_model_weights = copy.deepcopy(model.state_dict())
writer = writer_train if phase == "train" else writer_val
writer.add_scalar("epoch_loss", epoch_loss, epoch)
display = True if phase == "val" and display_img_after_epoch else False
figtitle = "{} visualization after epoch {}".format(phase, epoch)
subtitles = ["augmented input", "ground truth", "prediction"]
img = display_images_horizontally(display_images, fig_width=10, fig_height=2, display=display,
title=figtitle, subplot_titles=subtitles)
writer.add_image("{} comparison".format(phase), img.transpose(2, 0, 1), epoch)
now = time.time()
time_elapsed = now - last_time
print("Epoch completed in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60))
last_time = now
# Save the model
if save_model_weights:
path = model_save_path / "epoch-{}.pt".format(epoch+1)
print("Saving weights to:", path)
torch.save(model.state_dict(), path)
time_elapsed = time.time() - start
print("Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60))
# load the best weights
model.load_state_dict(best_model_weights)
# Save the model
if save_model_weights:
path = model_save_path / "best_weights.pt"
print("Saving best weights to:", path)
torch.save(model.state_dict(), path)
return model
def main():
model_name = "depth_unet_resnet_ssim6"
model_save_path = Path("training_logs/perception") / model_name
validation_set_size = 0.2
max_n_instances = None
batch_size = 42
augment_strategy = "medium"
path = "data/perception/test1"
model_save_path.mkdir(parents=True)
dataloaders = create_dataloaders(path=path, validation_set_size=validation_set_size,
batch_size=batch_size, max_n_instances=max_n_instances,
augment_strategy=augment_strategy, num_workers=0,
use_transform=None) # "midas_large"
save_model_weights = True
display_img_after_epoch = True
n_epochs = 20
#model = createDeepLabv3(outputchannels=len(DEFAULT_CLASSES) + 1, backbone=backbone, pretrained=True)
#model = createUNet()
model = createUNetResNet()
#criterion = torch.nn.MSELoss()
criterion = depth_loss_function
optimizer = optim.Adam(model.parameters(), lr=0.0001)
train_model(model=model, dataloaders=dataloaders, criterion=criterion, optimizer=optimizer,
n_epochs=n_epochs, model_save_path=model_save_path, save_model_weights=save_model_weights,
display_img_after_epoch=display_img_after_epoch)
if __name__ == '__main__':
main()
| [
"torch.utils.data.random_split",
"torch.set_grad_enabled",
"torch.abs",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.mean"
] | 1.7.1 | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb |
0.4 | """
IBN(b)-ResNet, implemented in PyTorch.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNbResNet', 'ibnb_resnet50', 'ibnb_resnet101', 'ibnb_resnet152']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block
from .resnet import ResBottleneck
class IBNbConvBlock(nn.Module):
"""
IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
activate=True):
super(IBNbConvBlock, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.inst_norm = nn.InstanceNorm2d(
num_features=out_channels,
affine=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.inst_norm(x)
if self.activate:
x = self.activ(x)
return x
def ibnb_conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
activate=True):
"""
7x7 version of the IBN(b)-ResNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
return IBNbConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
activate=activate)
class IBNbResUnit(nn.Module):
"""
IBN(b)-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
use_inst_norm : bool
Whether to use instance normalization.
"""
def __init__(self,
in_channels,
out_channels,
stride,
use_inst_norm):
super(IBNbResUnit, self).__init__()
self.use_inst_norm = use_inst_norm
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=False)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activate=False)
if self.use_inst_norm:
self.inst_norm = nn.InstanceNorm2d(
num_features=out_channels,
affine=True)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
if self.use_inst_norm:
x = self.inst_norm(x)
x = self.activ(x)
return x
class IBNbResInitBlock(nn.Module):
"""
IBN(b)-ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(IBNbResInitBlock, self).__init__()
self.conv = ibnb_conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class IBNbResNet(nn.Module):
"""
IBN(b)-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(IBNbResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", IBNbResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
use_inst_norm = (i < 2) and (j == len(channels_per_stage) - 1)
stage.add_module("unit{}".format(j + 1), IBNbResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
use_inst_norm=use_inst_norm))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_ibnbresnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join('~', '.torch', 'models'),
**kwargs):
"""
Create IBN(b)-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported IBN(b)-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNbResNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def ibnb_resnet50(**kwargs):
"""
IBN(b)-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=50, model_name="ibnb_resnet50", **kwargs)
def ibnb_resnet101(**kwargs):
"""
IBN(b)-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=101, model_name="ibnb_resnet101", **kwargs)
def ibnb_resnet152(**kwargs):
"""
IBN(b)-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=152, model_name="ibnb_resnet152", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
from torch.autograd import Variable
pretrained = False
models = [
ibnb_resnet50,
ibnb_resnet101,
ibnb_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibnb_resnet50 or weight_count == 25558568)
assert (model != ibnb_resnet101 or weight_count == 44550696)
assert (model != ibnb_resnet152 or weight_count == 60194344)
x = Variable(torch.randn(1, 3, 224, 224))
y = net(x)
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Linear",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.randn"
] | 0.4.0 | raijinspecial/imgclsmob | c87c0942420876941868c016211073dec4392e4d |
0.4 | """
PeleeNet, implemented in PyTorch.
Original paper: 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882.
"""
__all__ = ['PeleeNet', 'peleenet']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block, Concurrent
class PeleeBranch1(nn.Module):
"""
PeleeNet branch type 1 block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
stride : int or tuple/list of 2 int, default 1
Strides of the second convolution.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
stride=1):
super(PeleeBranch1, self).__init__()
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PeleeBranch2(nn.Module):
"""
PeleeNet branch type 2 block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels):
super(PeleeBranch2, self).__init__()
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class StemBlock(nn.Module):
"""
PeleeNet stem block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(StemBlock, self).__init__()
mid1_channels = out_channels // 2
mid2_channels = out_channels * 2
self.first_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=2)
self.branches = Concurrent()
self.branches.add_module("branch1", PeleeBranch1(
in_channels=out_channels,
out_channels=out_channels,
mid_channels=mid1_channels,
stride=2))
self.branches.add_module("branch2", nn.MaxPool2d(
kernel_size=2,
stride=2,
padding=0))
self.last_conv = conv1x1_block(
in_channels=mid2_channels,
out_channels=out_channels)
def forward(self, x):
x = self.first_conv(x)
x = self.branches(x)
x = self.last_conv(x)
return x
class DenseBlock(nn.Module):
"""
PeleeNet dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_size : int
Bottleneck width.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_size):
super(DenseBlock, self).__init__()
inc_channels = (out_channels - in_channels) // 2
mid_channels = inc_channels * bottleneck_size
self.branch1 = PeleeBranch1(
in_channels=in_channels,
out_channels=inc_channels,
mid_channels=mid_channels)
self.branch2 = PeleeBranch2(
in_channels=in_channels,
out_channels=inc_channels,
mid_channels=mid_channels)
def forward(self, x):
x1 = self.branch1(x)
x2 = self.branch2(x)
x = torch.cat((x, x1, x2), dim=1)
return x
class TransitionBlock(nn.Module):
"""
PeleeNet's transition block, like in DensNet, but with ordinary convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(TransitionBlock, self).__init__()
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.pool = nn.AvgPool2d(
kernel_size=2,
stride=2,
padding=0)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class PeleeNet(nn.Module):
"""
PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'
https://arxiv.org/abs/1804.06882.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck_sizes : list of int
Bottleneck sizes for each stage.
dropout_rate : float, default 0.5
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck_sizes,
dropout_rate=0.5,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(PeleeNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", StemBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
bottleneck_size = bottleneck_sizes[i]
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock(
in_channels=in_channels,
out_channels=in_channels))
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
bottleneck_size=bottleneck_size))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", conv1x1_block(
in_channels=in_channels,
out_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Sequential()
self.output.add_module('dropout', nn.Dropout(p=dropout_rate))
self.output.add_module('fc', nn.Linear(
in_features=in_channels,
out_features=num_classes))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_peleenet(model_name=None,
pretrained=False,
root=os.path.join('~', '.torch', 'models'),
**kwargs):
"""
Create PeleeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
growth_rate = 32
layers = [3, 4, 8, 6]
bottleneck_sizes = [1, 2, 4, 4]
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1]])[1:]],
layers,
[[init_block_channels]])[1:]
net = PeleeNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck_sizes=bottleneck_sizes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def peleenet(**kwargs):
"""
PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'
https://arxiv.org/abs/1804.06882.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_peleenet(model_name="peleenet", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
from torch.autograd import Variable
pretrained = False
models = [
peleenet,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != peleenet or weight_count == 2802248)
x = Variable(torch.randn(1, 3, 224, 224))
y = net(x)
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.init.constant_",
"torch.randn"
] | 0.4.0 | raijinspecial/imgclsmob | c5d3ab207a6304f1343e4394f0467bdc7403a72a |
1.1 | import os
import numpy as np
import torch
from torch.utils.data import Dataset
from common.laserscan import LaserScan, SemLaserScan
EXTENSIONS_SCAN = ['.bin']
EXTENSIONS_LABEL = ['.label']
def is_scan(filename):
return any(filename.endswith(ext) for ext in EXTENSIONS_SCAN)
def is_label(filename):
return any(filename.endswith(ext) for ext in EXTENSIONS_LABEL)
class SemanticKitti(Dataset):
def __init__(self, root, # directory where data is
sequences, # sequences for this data (e.g. [1,3,4,6])
labels, # label dict: (e.g 10: "car")
color_map, # colors dict bgr (e.g 10: [255, 0, 0])
learning_map, # classes to learn (0 to N-1 for xentropy)
learning_map_inv, # inverse of previous (recover labels)
sensor, # sensor to parse scans from
max_points=150000, # max number of points present in dataset
gt=True): # send ground truth?
# save deats
self.root = os.path.join(root, "sequences")
self.sequences = sequences
self.labels = labels
self.color_map = color_map
self.learning_map = learning_map
self.learning_map_inv = learning_map_inv
self.sensor = sensor
self.sensor_img_H = sensor["img_prop"]["height"]
self.sensor_img_W = sensor["img_prop"]["width"]
self.sensor_img_means = torch.tensor(sensor["img_means"],
dtype=torch.float)
self.sensor_img_stds = torch.tensor(sensor["img_stds"],
dtype=torch.float)
self.sensor_fov_up = sensor["fov_up"]
self.sensor_fov_down = sensor["fov_down"]
self.max_points = max_points
self.gt = gt
# get number of classes (can't be len(self.learning_map) because there
# are multiple repeated entries, so the number that matters is how many
# there are for the xentropy)
self.nclasses = len(self.learning_map_inv)
# sanity checks
# make sure directory exists
if os.path.isdir(self.root):
print("Sequences folder exists! Using sequences from %s" % self.root)
else:
raise ValueError("Sequences folder doesn't exist! Exiting...")
# make sure labels is a dict
assert (isinstance(self.labels, dict))
# make sure color_map is a dict
assert (isinstance(self.color_map, dict))
# make sure learning_map is a dict
assert (isinstance(self.learning_map, dict))
# make sure sequences is a list
assert (isinstance(self.sequences, list))
# placeholder for filenames
self.scan_files = []
self.label_files = []
# fill in with names, checking that all sequences are complete
for seq in self.sequences:
# to string
seq = '{0:02d}'.format(int(seq))
print("parsing seq {}".format(seq))
# get paths for each
scan_path = os.path.join(self.root, seq, "velodyne")
label_path = os.path.join(self.root, seq, "labels")
# get files
scan_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(scan_path)) for f in fn if is_scan(f)]
label_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(label_path)) for f in fn if is_label(f)]
# check all scans have labels
if self.gt:
assert (len(scan_files) == len(label_files))
# extend list
self.scan_files.extend(scan_files)
self.label_files.extend(label_files)
# sort for correspondance
self.scan_files.sort()
self.label_files.sort()
print("Using {} scans from sequences {}".format(len(self.scan_files),
self.sequences))
def __getitem__(self, index):
# get item in tensor shape
scan_file = self.scan_files[index]
if self.gt:
label_file = self.label_files[index]
# open a semantic laserscan
if self.gt:
scan = SemLaserScan(self.color_map,
project=True,
H=self.sensor_img_H,
W=self.sensor_img_W,
fov_up=self.sensor_fov_up,
fov_down=self.sensor_fov_down)
else:
scan = LaserScan(project=True,
H=self.sensor_img_H,
W=self.sensor_img_W,
fov_up=self.sensor_fov_up,
fov_down=self.sensor_fov_down)
# open and obtain scan
scan.open_scan(scan_file)
if self.gt:
scan.open_label(label_file)
# map unused classes to used classes (also for projection)
scan.sem_label = self.map(scan.sem_label, self.learning_map)
scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map)
# make a tensor of the uncompressed data (with the max num points)
unproj_n_points = scan.points.shape[0]
unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
unproj_remissions = torch.full([self.max_points], -1.0, dtype=torch.float)
unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
if self.gt:
unproj_labels = torch.full([self.max_points], -1.0, dtype=torch.int32)
unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
else:
unproj_labels = []
# get points and labels
proj_range = torch.from_numpy(scan.proj_range).clone()
proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
proj_remission = torch.from_numpy(scan.proj_remission).clone()
proj_mask = torch.from_numpy(scan.proj_mask)
if self.gt:
proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
proj_labels = proj_labels * proj_mask
else:
proj_labels = []
proj_x = torch.full([self.max_points], -1, dtype=torch.long)
proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
proj_y = torch.full([self.max_points], -1, dtype=torch.long)
proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
proj = torch.cat([proj_range.unsqueeze(0).clone(),
proj_xyz.clone().permute(2, 0, 1),
proj_remission.unsqueeze(0).clone()])
proj = (proj - self.sensor_img_means[:, None, None]
) / self.sensor_img_stds[:, None, None]
proj = proj * proj_mask.float()
# get name and sequence
path_norm = os.path.normpath(scan_file)
path_split = path_norm.split(os.sep)
path_seq = path_split[-3]
path_name = path_split[-1].replace(".bin", ".label")
# print("path_norm: ", path_norm)
# print("path_seq", path_seq)
# print("path_name", path_name)
# return
return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
def __len__(self):
return len(self.scan_files)
@staticmethod
def map(label, mapdict):
# put label from original values to xentropy
# or vice-versa, depending on dictionary values
# make learning map a lookup table
maxkey = 0
for key, data in mapdict.items():
if isinstance(data, list):
nel = len(data)
else:
nel = 1
if key > maxkey:
maxkey = key
# +100 hack making lut bigger just in case there are unknown labels
if nel > 1:
lut = np.zeros((maxkey + 100, nel), dtype=np.int32)
else:
lut = np.zeros((maxkey + 100), dtype=np.int32)
for key, data in mapdict.items():
try:
lut[key] = data
except IndexError:
print("Wrong key ", key)
# do the mapping
return lut[label]
class Parser:
# standard conv, BN, relu
def __init__(self,
root, # directory for data
train_sequences, # sequences to train
valid_sequences, # sequences to validate.
test_sequences, # sequences to test (if none, don't get)
labels, # labels in data
color_map, # color for each label
learning_map, # mapping for training labels
learning_map_inv, # recover labels from xentropy
sensor, # sensor to use
max_points, # max points in each scan in entire dataset
batch_size, # batch size for train and val
workers, # threads to load data
gt=True, # get gt?
shuffle_train=True): # shuffle training set?
super(Parser, self).__init__()
# if I am training, get the dataset
self.root = root
self.train_sequences = train_sequences
self.valid_sequences = valid_sequences
self.test_sequences = test_sequences
self.labels = labels
self.color_map = color_map
self.learning_map = learning_map
self.learning_map_inv = learning_map_inv
self.sensor = sensor
self.max_points = max_points
self.batch_size = batch_size
self.workers = workers
self.gt = gt
self.shuffle_train = shuffle_train
# number of classes that matters is the one for xentropy
self.nclasses = len(self.learning_map_inv)
# Data loading code
self.train_dataset = SemanticKitti(root=self.root,
sequences=self.train_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=self.gt)
self.trainloader = torch.utils.data.DataLoader(self.train_dataset,
batch_size=self.batch_size,
shuffle=self.shuffle_train,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.trainloader) > 0
self.trainiter = iter(self.trainloader)
self.valid_dataset = SemanticKitti(root=self.root,
sequences=self.valid_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=self.gt)
self.validloader = torch.utils.data.DataLoader(self.valid_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.validloader) > 0
self.validiter = iter(self.validloader)
if self.test_sequences:
self.test_dataset = SemanticKitti(root=self.root,
sequences=self.test_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=False)
self.testloader = torch.utils.data.DataLoader(self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.testloader) > 0
self.testiter = iter(self.testloader)
def get_train_batch(self):
scans = self.trainiter.next()
return scans
def get_train_set(self):
return self.trainloader
def get_valid_batch(self):
scans = self.validiter.next()
return scans
def get_valid_set(self):
return self.validloader
def get_test_batch(self):
scans = self.testiter.next()
return scans
def get_test_set(self):
return self.testloader
def get_train_size(self):
return len(self.trainloader)
def get_valid_size(self):
return len(self.validloader)
def get_test_size(self):
return len(self.testloader)
def get_n_classes(self):
return self.nclasses
def get_original_class_string(self, idx):
return self.labels[idx]
def get_xentropy_class_string(self, idx):
return self.labels[self.learning_map_inv[idx]]
def to_original(self, label):
# put label in original values
return SemanticKitti.map(label, self.learning_map_inv)
def to_xentropy(self, label):
# put label in xentropy values
return SemanticKitti.map(label, self.learning_map)
def to_color(self, label):
# put label in original values
label = SemanticKitti.map(label, self.learning_map_inv)
# put label in color
return SemanticKitti.map(label, self.color_map)
| [
"torch.from_numpy",
"torch.full",
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.1.0 | andrewkouri/lidar-bonnetal | a0b5c6aba530701084ac66a02532689ed580f934 |
1.4 | """Test defintion common to CPU and CUDA"""
import torch
import torchaudio.functional as F
from parameterized import parameterized
from scipy import signal
from torchaudio_unittest import common_utils
class Lfilter(common_utils.TestBaseMixin):
def test_simple(self):
"""
Create a very basic signal,
Then make a simple 4th order delay
The output should be same as the input but shifted
"""
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)
self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)
def test_clamp(self):
input_signal = torch.ones(1, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([1, 0], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, -0.95], dtype=self.dtype, device=self.device)
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=True)
assert output_signal.max() <= 1
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=False)
assert output_signal.max() > 1
@parameterized.expand([
((44100,),),
((3, 44100),),
((2, 3, 44100),),
((1, 2, 3, 44100),)
])
def test_shape(self, shape):
torch.random.manual_seed(42)
waveform = torch.rand(*shape, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)
assert shape == waveform.size() == output_waveform.size()
def test_9th_order_filter_stability(self):
"""
Validate the precision of lfilter against reference scipy implementation when using high order filter.
The reference implementation use cascaded second-order filters so is more numerically accurate.
"""
# create an impulse signal
x = torch.zeros(1024, dtype=self.dtype, device=self.device)
x[0] = 1
# get target impulse response
sos = signal.butter(9, 850, 'hp', fs=22050, output='sos')
y = torch.from_numpy(signal.sosfilt(sos, x.cpu().numpy())).to(self.dtype).to(self.device)
# get lfilter coefficients
b, a = signal.butter(9, 850, 'hp', fs=22050, output='ba')
b, a = torch.from_numpy(b).to(self.dtype).to(self.device), torch.from_numpy(
a).to(self.dtype).to(self.device)
# predict impulse response
yhat = F.lfilter(x, a, b, False)
self.assertEqual(yhat, y, atol=1e-4, rtol=1e-5)
class Spectrogram(common_utils.TestBaseMixin):
@parameterized.expand([(0., ), (1., ), (2., ), (3., )])
def test_grad_at_zero(self, power):
"""The gradient of power spectrogram should not be nan but zero near x=0
https://github.com/pytorch/audio/issues/993
"""
x = torch.zeros(1, 22050, requires_grad=True)
spec = F.spectrogram(
x,
pad=0,
window=None,
n_fft=2048,
hop_length=None,
win_length=None,
power=power,
normalized=False,
)
spec.sum().backward()
assert not x.grad.isnan().sum()
| [
"torch.zeros",
"torch.rand",
"torch.ones",
"torch.random.manual_seed",
"torch.from_numpy",
"torch.tensor"
] | 1.4.0 | prarabdh9909/audio | 6bad3a66a7a1c7cc05755e9ee5931b7391d2b94c |
1.0 | """
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
import onmt
from onmt.modules.sparse_losses import SparsemaxLoss
from onmt.modules.sparse_activations import LogSparsemax
def build_loss_compute(model, tgt_field, opt, train=True):
"""
Returns a LossCompute subclass which wraps around an nn.Module subclass
(such as nn.NLLLoss) which defines the loss criterion. The LossCompute
object allows this loss to be computed in shards and passes the relevant
data to a Statistics object which handles training/validation logging.
Currently, the NMTLossCompute class handles all loss computation except
for when using a copy mechanism.
"""
device = torch.device("cuda" if onmt.utils.misc.use_gpu(opt) else "cpu")
padding_idx = tgt_field.vocab.stoi[tgt_field.pad_token]
unk_idx = tgt_field.vocab.stoi[tgt_field.unk_token]
if opt.copy_attn:
criterion = onmt.modules.CopyGeneratorLoss(
len(tgt_field.vocab), opt.copy_attn_force,
unk_index=unk_idx, ignore_index=padding_idx
)
elif opt.label_smoothing > 0 and train:
criterion = LabelSmoothingLoss(
opt.label_smoothing, len(tgt_field.vocab), ignore_index=padding_idx
)
elif isinstance(model.generator[-1], LogSparsemax):
criterion = SparsemaxLoss(ignore_index=padding_idx, reduction='sum')
else:
criterion = nn.NLLLoss(ignore_index=padding_idx, reduction='sum')
# if the loss function operates on vectors of raw logits instead of
# probabilities, only the first part of the generator needs to be
# passed to the NMTLossCompute. At the moment, the only supported
# loss function of this kind is the sparsemax loss.
use_raw_logits = isinstance(criterion, SparsemaxLoss)
loss_gen = model.generator[0] if use_raw_logits else model.generator
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
criterion, loss_gen, tgt_field.vocab, opt.copy_loss_by_seqlength
)
else:
compute = NMTLossCompute(criterion, loss_gen)
compute.to(device)
return compute
class LossComputeBase(nn.Module):
"""
Class for managing efficient loss computation. Handles
sharding next step predictions and accumulating multiple
loss computations
Users can implement their own loss computation strategy by making
subclass of this one. Users need to implement the _compute_loss()
and make_shard_state() methods.
Args:
generator (:obj:`nn.Module`) :
module that maps the output of the decoder to a
distribution over the target vocabulary.
tgt_vocab (:obj:`Vocab`) :
torchtext vocab object representing the target output
normalzation (str): normalize by "sents" or "tokens"
"""
def __init__(self, criterion, generator):
super(LossComputeBase, self).__init__()
self.criterion = criterion
self.generator = generator
@property
def padding_idx(self):
return self.criterion.ignore_index
def _make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own _compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def _compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def __call__(self,
batch,
output,
attns,
normalization=1.0,
shard_size=0,
trunc_start=0,
trunc_size=None):
"""Compute the forward loss, possibly in shards in which case this
method also runs the backward pass and returns ``None`` as the loss
value.
Also supports truncated BPTT for long sequences by taking a
range in the decoder output sequence to back propagate in.
Range is from `(trunc_start, trunc_start + trunc_size)`.
Note sharding is an exact efficiency trick to relieve memory
required for the generation buffers. Truncation is an
approximate efficiency trick to relieve the memory required
in the RNN buffers.
Args:
batch (batch) : batch of labeled examples
output (:obj:`FloatTensor`) :
output of decoder model `[tgt_len x batch x hidden]`
attns (dict) : dictionary of attention distributions
`[tgt_len x batch x src_len]`
normalization: Optional normalization factor.
shard_size (int) : maximum number of examples in a shard
trunc_start (int) : starting position of truncation window
trunc_size (int) : length of truncation window
Returns:
A tuple with the loss and a :obj:`onmt.utils.Statistics` instance.
"""
if trunc_size is None:
trunc_size = batch.tgt.size(0) - trunc_start
trunc_range = (trunc_start, trunc_start + trunc_size)
shard_state = self._make_shard_state(batch, output, trunc_range, attns)
if shard_size == 0:
loss, stats = self._compute_loss(batch, **shard_state)
return loss / float(normalization), stats
batch_stats = onmt.utils.Statistics()
for shard in shards(shard_state, shard_size):
loss, stats = self._compute_loss(batch, **shard)
loss.div(float(normalization)).backward()
batch_stats.update(stats)
return None, batch_stats
def _stats(self, loss, scores, target):
"""
Args:
loss (:obj:`FloatTensor`): the loss computed by the loss criterion.
scores (:obj:`FloatTensor`): a score for each possible output
target (:obj:`FloatTensor`): true targets
Returns:
:obj:`onmt.utils.Statistics` : statistics for this batch.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target).masked_select(non_padding).sum().item()
num_non_padding = non_padding.sum().item()
return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)
def _bottle(self, _v):
return _v.view(-1, _v.size(2))
def _unbottle(self, _v, batch_size):
return _v.view(-1, batch_size, _v.size(1))
class LabelSmoothingLoss(nn.Module):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__()
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
def forward(self, output, target):
"""
output (FloatTensor): batch_size x n_classes
target (LongTensor): batch_size
"""
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='sum')
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, criterion, generator, normalization="sents"):
super(NMTLossCompute, self).__init__(criterion, generator)
def _make_shard_state(self, batch, output, range_, attns=None):
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1], :, 0],
}
def _compute_loss(self, batch, output, target):
bottled_output = self._bottle(output)
scores = self.generator(bottled_output)
gtruth = target.view(-1)
loss = self.criterion(scores, gtruth)
stats = self._stats(loss.clone(), scores, gtruth)
return loss, stats
def filter_shard_state(state, shard_size=None):
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split)
def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| [
"torch.nn.NLLLoss",
"torch.autograd.backward",
"torch.split",
"torch.nn.functional.kl_div",
"torch.full"
] | 1.0 | sajastu/abs-summarization | 9d4b35b457cfd617965ed1fab68c173c98333439 |
1.0 | import copy
import unittest
import math
import torch
import onmt
import onmt.inputters
import onmt.opts
from onmt.model_builder import build_embeddings, \
build_encoder, build_decoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.utils.parse import ArgumentParser
parser = ArgumentParser(description='train.py')
onmt.opts.model_opts(parser)
onmt.opts.train_opts(parser)
# -data option is required, but not used in this test, so dummy.
opt = parser.parse_known_args(['-data', 'dummy'])[0]
class TestModel(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestModel, self).__init__(*args, **kwargs)
self.opt = opt
def get_field(self):
src = onmt.inputters.get_fields("text", 0, 0)["src"]
src.base_field.build_vocab([])
return src
def get_batch(self, source_l=3, bsize=1):
# len x batch x nfeat
test_src = torch.ones(source_l, bsize, 1).long()
test_tgt = torch.ones(source_l, bsize, 1).long()
test_length = torch.ones(bsize).fill_(source_l).long()
return test_src, test_tgt, test_length
def get_batch_image(self, tgt_l=3, bsize=1, h=15, w=17):
# batch x c x h x w
test_src = torch.ones(bsize, 3, h, w).float()
test_tgt = torch.ones(tgt_l, bsize, 1).long()
test_length = None
return test_src, test_tgt, test_length
def get_batch_audio(self, tgt_l=7, bsize=3, sample_rate=5500,
window_size=0.03, t=37):
# batch x 1 x nfft x t
nfft = int(math.floor((sample_rate * window_size) / 2) + 1)
test_src = torch.ones(bsize, 1, nfft, t).float()
test_tgt = torch.ones(tgt_l, bsize, 1).long()
test_length = torch.ones(bsize).long().fill_(tgt_l)
return test_src, test_tgt, test_length
def embeddings_forward(self, opt, source_l=3, bsize=1):
'''
Tests if the embeddings works as expected
args:
opt: set of options
source_l: Length of generated input sentence
bsize: Batchsize of generated input
'''
word_field = self.get_field()
emb = build_embeddings(opt, word_field)
test_src, _, __ = self.get_batch(source_l=source_l, bsize=bsize)
if opt.decoder_type == 'transformer':
input = torch.cat([test_src, test_src], 0)
res = emb(input)
compare_to = torch.zeros(source_l * 2, bsize,
opt.src_word_vec_size)
else:
res = emb(test_src)
compare_to = torch.zeros(source_l, bsize, opt.src_word_vec_size)
self.assertEqual(res.size(), compare_to.size())
def encoder_forward(self, opt, source_l=3, bsize=1):
'''
Tests if the encoder works as expected
args:
opt: set of options
source_l: Length of generated input sentence
bsize: Batchsize of generated input
'''
if opt.rnn_size > 0:
opt.enc_rnn_size = opt.rnn_size
word_field = self.get_field()
embeddings = build_embeddings(opt, word_field)
enc = build_encoder(opt, embeddings)
test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
bsize=bsize)
hidden_t, outputs, test_length = enc(test_src, test_length)
# Initialize vectors to compare size with
test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.enc_rnn_size)
test_out = torch.zeros(source_l, bsize, opt.dec_rnn_size)
# Ensure correct sizes and types
self.assertEqual(test_hid.size(),
hidden_t[0].size(),
hidden_t[1].size())
self.assertEqual(test_out.size(), outputs.size())
self.assertEqual(type(outputs), torch.Tensor)
def nmtmodel_forward(self, opt, source_l=3, bsize=1):
"""
Creates a nmtmodel with a custom opt function.
Forwards a testbatch and checks output size.
Args:
opt: Namespace with options
source_l: length of input sequence
bsize: batchsize
"""
if opt.rnn_size > 0:
opt.enc_rnn_size = opt.rnn_size
opt.dec_rnn_size = opt.rnn_size
word_field = self.get_field()
embeddings = build_embeddings(opt, word_field)
enc = build_encoder(opt, embeddings)
embeddings = build_embeddings(opt, word_field, for_encoder=False)
dec = build_decoder(opt, embeddings)
model = onmt.models.model.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
bsize=bsize)
outputs, attn = model(test_src, test_tgt, test_length)
outputsize = torch.zeros(source_l - 1, bsize, opt.dec_rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.Tensor)
def imagemodel_forward(self, opt, tgt_l=2, bsize=1, h=15, w=17):
"""
Creates an image-to-text nmtmodel with a custom opt function.
Forwards a testbatch and checks output size.
Args:
opt: Namespace with options
source_l: length of input sequence
bsize: batchsize
"""
if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':
return
word_field = self.get_field()
enc = ImageEncoder(
opt.enc_layers, opt.brnn, opt.enc_rnn_size, opt.dropout)
embeddings = build_embeddings(opt, word_field, for_encoder=False)
dec = build_decoder(opt, embeddings)
model = onmt.models.model.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch_image(
h=h, w=w,
bsize=bsize,
tgt_l=tgt_l)
outputs, attn = model(test_src, test_tgt, test_length)
outputsize = torch.zeros(tgt_l - 1, bsize, opt.dec_rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.Tensor)
def audiomodel_forward(self, opt, tgt_l=7, bsize=3, t=37):
"""
Creates a speech-to-text nmtmodel with a custom opt function.
Forwards a testbatch and checks output size.
Args:
opt: Namespace with options
source_l: length of input sequence
bsize: batchsize
"""
if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':
return
if opt.rnn_type == 'SRU':
return
word_field = self.get_field()
enc = AudioEncoder(opt.rnn_type, opt.enc_layers, opt.dec_layers,
opt.brnn, opt.enc_rnn_size, opt.dec_rnn_size,
opt.audio_enc_pooling, opt.dropout,
opt.sample_rate, opt.window_size)
embeddings = build_embeddings(opt, word_field, for_encoder=False)
dec = build_decoder(opt, embeddings)
model = onmt.models.model.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch_audio(
bsize=bsize,
sample_rate=opt.sample_rate,
window_size=opt.window_size,
t=t, tgt_l=tgt_l)
outputs, attn = model(test_src, test_tgt, test_length)
outputsize = torch.zeros(tgt_l - 1, bsize, opt.dec_rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.Tensor)
def _add_test(param_setting, methodname):
"""
Adds a Test to TestModel according to settings
Args:
param_setting: list of tuples of (param, setting)
methodname: name of the method that gets called
"""
def test_method(self):
opt = copy.deepcopy(self.opt)
if param_setting:
for param, setting in param_setting:
setattr(opt, param, setting)
ArgumentParser.update_model_opts(opt)
getattr(self, methodname)(opt)
if param_setting:
name = 'test_' + methodname + "_" + "_".join(
str(param_setting).split())
else:
name = 'test_' + methodname + '_standard'
setattr(TestModel, name, test_method)
test_method.__name__ = name
'''
TEST PARAMETERS
'''
opt.brnn = False
test_embeddings = [[],
[('decoder_type', 'transformer')]
]
for p in test_embeddings:
_add_test(p, 'embeddings_forward')
tests_encoder = [[],
[('encoder_type', 'mean')],
# [('encoder_type', 'transformer'),
# ('word_vec_size', 16), ('rnn_size', 16)],
[]
]
for p in tests_encoder:
_add_test(p, 'encoder_forward')
tests_nmtmodel = [[('rnn_type', 'GRU')],
[('layers', 10)],
[('input_feed', 0)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16),
('position_encoding', True)],
[('coverage_attn', True)],
[('copy_attn', True)],
[('global_attention', 'mlp')],
[('context_gate', 'both')],
[('context_gate', 'target')],
[('context_gate', 'source')],
[('encoder_type', "brnn"),
('brnn_merge', 'sum')],
[('encoder_type', "brnn")],
[('decoder_type', 'cnn'),
('encoder_type', 'cnn')],
[('encoder_type', 'rnn'),
('global_attention', None)],
[('encoder_type', 'rnn'),
('global_attention', None),
('copy_attn', True),
('copy_attn_type', 'general')],
[('encoder_type', 'rnn'),
('global_attention', 'mlp'),
('copy_attn', True),
('copy_attn_type', 'general')],
[],
]
if onmt.models.sru.check_sru_requirement():
# """ Only do SRU test if requirment is safisfied. """
# SRU doesn't support input_feed.
tests_nmtmodel.append([('rnn_type', 'SRU'), ('input_feed', 0)])
for p in tests_nmtmodel:
_add_test(p, 'nmtmodel_forward')
for p in tests_nmtmodel:
_add_test(p, 'imagemodel_forward')
for p in tests_nmtmodel:
p.append(('sample_rate', 5500))
p.append(('window_size', 0.03))
# when reasonable, set audio_enc_pooling to 2
for arg, val in p:
if arg == "layers" and int(val) > 2:
# Need lengths >= audio_enc_pooling**n_layers.
# That condition is unrealistic for large n_layers,
# so leave audio_enc_pooling at 1.
break
else:
p.append(('audio_enc_pooling', '2'))
_add_test(p, 'audiomodel_forward')
| [
"torch.zeros",
"torch.cat",
"torch.ones"
] | 1.0 | sajastu/abs-summarization | 9d4b35b457cfd617965ed1fab68c173c98333439 |
0.4 | # System libs
import os
import argparse
from distutils.version import LooseVersion
from multiprocessing import Queue, Process
# Numerical libs
import numpy as np
import math
import torch
import torch.nn as nn
from scipy.io import loadmat
# Our libs
from mit_semseg.config import cfg
from mit_semseg.dataset import ValDataset
from mit_semseg.models import ModelBuilder, SegmentationModule
from mit_semseg.utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices, setup_logger
from mit_semseg.lib.nn import user_scattered_collate, async_copy_to
from mit_semseg.lib.utils import as_numpy
from PIL import Image
from tqdm import tqdm
colors = loadmat('data/color150.mat')['colors']
def visualize_result(data, pred, dir_result):
(img, seg, info) = data
# segmentation
seg_color = colorEncode(seg, colors)
# prediction
pred_color = colorEncode(pred, colors)
# aggregate images and save
im_vis = np.concatenate((img, seg_color, pred_color),
axis=1).astype(np.uint8)
img_name = info.split('/')[-1]
Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))
def evaluate(segmentation_module, loader, cfg, gpu_id, result_queue):
segmentation_module.eval()
for batch_data in loader:
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data['seg_label'][0])
img_resized_list = batch_data['img_data']
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
scores = async_copy_to(scores, gpu_id)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict['img_data'] = img
del feed_dict['img_ori']
del feed_dict['info']
feed_dict = async_copy_to(feed_dict, gpu_id)
# forward pass
scores_tmp = segmentation_module(feed_dict, segSize=segSize)
scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu())
# calculate accuracy and SEND THEM TO MASTER
acc, pix = accuracy(pred, seg_label)
intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)
result_queue.put_nowait((acc, pix, intersection, union))
# visualization
if cfg.VAL.visualize:
visualize_result(
(batch_data['img_ori'], seg_label, batch_data['info']),
pred,
os.path.join(cfg.DIR, 'result')
)
def worker(cfg, gpu_id, start_idx, end_idx, result_queue):
torch.cuda.set_device(gpu_id)
# Dataset and Loader
dataset_val = ValDataset(
cfg.DATASET.root_dataset,
cfg.DATASET.list_val,
cfg.DATASET,
start_idx=start_idx, end_idx=end_idx)
loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=cfg.VAL.batch_size,
shuffle=False,
collate_fn=user_scattered_collate,
num_workers=2)
# Network Builders
net_encoder = ModelBuilder.build_encoder(
arch=cfg.MODEL.arch_encoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
weights=cfg.MODEL.weights_encoder)
net_decoder = ModelBuilder.build_decoder(
arch=cfg.MODEL.arch_decoder.lower(),
fc_dim=cfg.MODEL.fc_dim,
num_class=cfg.DATASET.num_class,
weights=cfg.MODEL.weights_decoder,
use_softmax=True)
crit = nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
segmentation_module.cuda()
# Main loop
evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)
def main(cfg, gpus):
with open(cfg.DATASET.list_val, 'r') as f:
lines = f.readlines()
num_files = len(lines)
num_files_per_gpu = math.ceil(num_files / len(gpus))
pbar = tqdm(total=num_files)
acc_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
result_queue = Queue(500)
procs = []
for idx, gpu_id in enumerate(gpus):
start_idx = idx * num_files_per_gpu
end_idx = min(start_idx + num_files_per_gpu, num_files)
proc = Process(target=worker, args=(cfg, gpu_id, start_idx, end_idx, result_queue))
print('gpu:{}, start_idx:{}, end_idx:{}'.format(gpu_id, start_idx, end_idx))
proc.start()
procs.append(proc)
# master fetches results
processed_counter = 0
while processed_counter < num_files:
if result_queue.empty():
continue
(acc, pix, intersection, union) = result_queue.get()
acc_meter.update(acc, pix)
intersection_meter.update(intersection)
union_meter.update(union)
processed_counter += 1
pbar.update(1)
for p in procs:
p.join()
# summary
iou = intersection_meter.sum / (union_meter.sum + 1e-10)
for i, _iou in enumerate(iou):
print('class [{}], IoU: {:.4f}'.format(i, _iou))
print('[Eval Summary]:')
print('Mean IoU: {:.4f}, Accuracy: {:.2f}%'
.format(iou.mean(), acc_meter.average()*100))
print('Evaluation Done!')
if __name__ == '__main__':
assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \
'PyTorch>=0.4.0 is required'
parser = argparse.ArgumentParser(
description="PyTorch Semantic Segmentation Validation"
)
parser.add_argument(
"--cfg",
default="config/ade20k-resnet50dilated-ppm_deepsup.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"--gpus",
default="0",
help="gpus to use, e.g. 0-3 or 0,1,2,3"
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
# cfg.freeze()
logger = setup_logger(distributed_rank=0) # TODO
logger.info("Loaded configuration file {}".format(args.cfg))
logger.info("Running with config:\n{}".format(cfg))
# absolute paths of model weights
cfg.MODEL.weights_encoder = os.path.join(
cfg.DIR, 'encoder_' + cfg.VAL.checkpoint)
cfg.MODEL.weights_decoder = os.path.join(
cfg.DIR, 'decoder_' + cfg.VAL.checkpoint)
assert os.path.exists(cfg.MODEL.weights_encoder) and \
os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"
if not os.path.isdir(os.path.join(cfg.DIR, "result")):
os.makedirs(os.path.join(cfg.DIR, "result"))
# Parse gpu ids
gpus = parse_devices(args.gpus)
gpus = [x.replace('gpu', '') for x in gpus]
gpus = [int(x) for x in gpus]
main(cfg, gpus)
| [
"torch.nn.NLLLoss",
"torch.zeros",
"torch.max",
"torch.no_grad",
"torch.cuda.set_device",
"torch.utils.data.DataLoader"
] | 0.4.1 | chenjun2hao/segmentation.pytorch | a319d0f006559dd58bd853065e6fe79ae8c23791 |
1.7 | # Copyright (c) maiot GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from typing import List, Text, Dict
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
from zenml.steps.trainer import TorchBaseTrainerStep
from zenml.steps.trainer import utils
from zenml.steps.trainer.pytorch_trainers import utils as torch_utils
from zenml.utils import path_utils
class BinaryClassifier(nn.Module):
def __init__(self):
super(BinaryClassifier, self).__init__()
self.layer_1 = nn.Linear(8, 64)
self.layer_2 = nn.Linear(64, 64)
self.layer_out = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(64)
self.batchnorm2 = nn.BatchNorm1d(64)
def forward(self, inputs):
x = self.relu(self.layer_1(inputs))
x = self.batchnorm1(x)
x = self.relu(self.layer_2(x))
x = self.batchnorm2(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum / y_test.shape[0]
acc = torch.round(acc * 100)
return acc
class FeedForwardTrainer(TorchBaseTrainerStep):
def __init__(self,
batch_size: int = 32,
lr: float = 0.0001,
epochs: int = 10,
dropout_chance: int = 0.2,
loss: str = 'mse',
metrics: List[str] = None,
hidden_layers: List[int] = None,
hidden_activation: str = 'relu',
last_activation: str = 'sigmoid',
input_units: int = 8,
output_units: int = 1,
device: str = None,
**kwargs):
self.batch_size = batch_size
self.lr = lr
self.epochs = epochs
self.dropout_chance = dropout_chance
self.loss = loss
self.metrics = metrics or []
self.hidden_layers = hidden_layers or [64, 32, 16]
self.hidden_activation = hidden_activation
self.last_activation = last_activation
self.input_units = input_units
self.output_units = output_units
self.device = torch_utils.assign_device(device)
super(FeedForwardTrainer, self).__init__(
batch_size=self.batch_size,
lr=self.lr,
epochs=self.epochs,
dropout_chance=self.dropout_chance,
loss=self.loss,
metrics=self.metrics,
hidden_layers=self.hidden_layers,
hidden_activation=self.hidden_activation,
last_activation=self.last_activation,
input_units=self.input_units,
output_units=self.output_units,
device = device,
**kwargs)
def input_fn(self,
file_patterns: List[Text]):
dataset = torch_utils.TFRecordTorchDataset(file_patterns,
self.schema)
loader = torch.utils.data.DataLoader(dataset,
batch_size=self.batch_size,
drop_last=True)
return loader
def model_fn(self, train_dataset, eval_dataset):
return BinaryClassifier()
def test_fn(self, model, dataset):
# Activate the evaluation mode
model.eval()
batch_list = []
for x, y, raw in dataset:
# start with an empty batch
batch = {}
# add the raw features with the transformed features and labels
batch.update(x)
batch.update(y)
batch.update(raw)
# finally, add the output of the model
x_batch = torch.cat([v.to(self.device) for v in x.values()], dim=-1)
p = model(x_batch)
if isinstance(p, torch.Tensor):
batch.update({'output': p})
elif isinstance(p, dict):
batch.update(p)
elif isinstance(p, list):
batch.update(
{'output_{}'.format(i): v for i, v in enumerate(p)})
else:
raise TypeError('Unknown output format!')
batch_list.append(batch)
combined_batch = utils.combine_batch_results(batch_list)
return combined_batch
def run_fn(self):
train_split_patterns = [self.input_patterns[split] for split in
self.split_mapping[utils.TRAIN_SPLITS]]
train_dataset = self.input_fn(train_split_patterns)
eval_split_patterns = [self.input_patterns[split] for split in
self.split_mapping[utils.EVAL_SPLITS]]
eval_dataset = self.input_fn(eval_split_patterns)
model = self.model_fn(train_dataset, eval_dataset)
model.to(self.device)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
writer = SummaryWriter(self.log_dir)
model.train()
total_count = 0
for e in range(1, self.epochs + 1):
epoch_loss = 0
epoch_acc = 0
step_count = 0
for x, y, _ in train_dataset:
step_count += 1
total_count += 1
x_batch = torch.cat([v.to(self.device) for v in x.values()], dim=-1)
y_batch = torch.cat([v.to(self.device) for v in y.values()], dim=-1)
optimizer.zero_grad()
y_pred = model(x_batch)
loss = criterion(y_pred, y_batch)
acc = binary_acc(y_pred, y_batch)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
if e == 1 and step_count == 1:
writer.add_graph(model, x_batch)
writer.add_scalar('training_loss', loss, total_count)
writer.add_scalar('training_accuracy', acc, total_count)
print(f'Epoch {e + 0:03}: | Loss: '
f'{epoch_loss / step_count:.5f} | Acc: '
f'{epoch_acc / step_count:.3f}')
# test
for split in self.split_mapping[utils.TEST_SPLITS]:
assert split in self.input_patterns, \
f'There are currently no inputs for the split "{split}" ' \
f'which is currently used in the {utils.TEST_SPLITS} of the ' \
f'split mapping.'
pattern = self.input_patterns[split]
test_dataset = self.input_fn([pattern])
test_results = self.test_fn(model, test_dataset)
utils.save_test_results(test_results, self.output_patterns[split])
path_utils.create_dir_if_not_exists(self.serving_model_dir)
if path_utils.is_remote(self.serving_model_dir):
temp_model_dir = '__temp_model_dir__'
temp_path = os.path.join(os.getcwd(), temp_model_dir)
if path_utils.is_dir(temp_path):
raise PermissionError('{} is used as a temp path but it '
'already exists. Please remove it to '
'continue.')
torch.save(model, temp_path)
path_utils.copy_dir(temp_path, self.serving_model_dir)
path_utils.rm_dir(temp_path)
else:
torch.save(model, os.path.join(self.serving_model_dir, 'model.pt'))
| [
"torch.nn.Linear",
"torch.round",
"torch.nn.Dropout",
"torch.sigmoid",
"torch.save",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.utils.data.DataLoader",
"torch.nn.BCEWithLogitsLoss",
"torch.utils.tensorboard.SummaryWriter"
] | 1.7.0 | ORG-MARS/zenml | 8ee9a9264397d4e24a34c906e34a443782b189d3 |
0.4 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: speedinghzl02
## Modified by: RainbowSecret
## Microsoft Research
## [email protected]
## Copyright (c) 2018
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import cv2
import pdb
import collections
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path as osp
from PIL import Image, ImageOps, ImageFilter
import random
import torch
import torchvision
from torch.utils import data
import torchvision.transforms as transforms
class LeadBangTrain(data.Dataset):
def __init__(self, root, max_iters=None,
scale=True, mirror=True, ignore_label=255, use_aug=False, network="resnet101"):
self.root = root
# self.crop_h, self.crop_w = crop_size
self.crop_h = 480
self.crop_w = 480
self.img_width = 512
self.img_height = 512
self.scale = scale
self.ignore_label = ignore_label
self.is_mirror = mirror
self.use_aug = use_aug
self.network = network
self.files = []
self.cache_img = {}
self.cache_label = {}
self.item_idx_list = []
for item_idx in range(1, 1463):
self.item_idx_list.append(item_idx)
img_path = 'source/' + str(item_idx) + ".bmp"
label_path = 'label/' + str(item_idx) + ".bmp"
img_file = osp.join(self.root, img_path)
label_file = osp.join(self.root, label_path)
print('label file: ', label_file)
self.files.append({
"img": img_file,
"label": label_file,
"name": str(item_idx),
"weight": 1
})
self.cache_img[item_idx] = cv2.imread(img_file)
self.cache_label[item_idx] = 255 - cv2.imread(label_file, 0)
print('{} images are loaded!'.format(1462))
def __len__(self):
return len(self.files)
def generate_scale_label(self, image, label):
f_scale = 0.5 + random.randint(0, 16) / 10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST)
return image, label
def rescale(self, image, label):
image = cv2.resize(image, (self.img_width, self.img_height))
label = cv2.resize(label, (self.img_width, self.img_height))
return image, label
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def get_rotate_angle(self, angle_min, angle_max, angle_delta):
count = int((angle_max - angle_min) / angle_delta)
delta = random.random() * (count + 1) * angle_delta
angle = angle_min + delta
if angle < angle_min:
angle = angle_min
if angle > angle_max:
angle = angle_max
return angle
def rotate(self, image, angle, border_value=None):
center = (self.img_width // 2, self.img_height // 2)
M = cv2.getRotationMatrix2D(center, angle, 1)
if border_value is None:
rotated = cv2.warpAffine(image, M, (self.img_width, self.img_height))
else:
rotated = cv2.warpAffine(image, M, (self.img_width, self.img_height), borderValue=(int(border_value),))
return rotated
def get_border_value(self, mat):
r = mat.shape[0]
c = mat.shape[1]
return (mat[1][1] + mat[1][c - 2] + mat[r-2][1] + mat[r-2][c-2] + mat[2][2] + mat[2][c - 3] + mat[r-3][2] + mat[r-3][c-3]) / 8.0
def rotate_img_lb(self, image, label, angle):
b = image[0]
g = image[1]
r = image[2]
# (102.9801, 115.9465, 122.7717)
# b = self.rotate(b, angle, border_value=255 - 102.9801)
# g = self.rotate(g, angle, border_value=255-115.9465)
# r = self.rotate(r, angle, border_value=255-122.7717)
b = self.rotate(b, angle, border_value=self.get_border_value(b))
g = self.rotate(g, angle, border_value=self.get_border_value(g))
r = self.rotate(r, angle, border_value=self.get_border_value(r))
label = self.rotate(label, angle)
image = np.asarray([b, g, r], dtype=np.float32)
ret, label = cv2.threshold(label, 127, 255, cv2.THRESH_BINARY)
return image, label
def adv_img_lb(self, img, lb):
# brightness
img += (random.random() * 10 - 5)
# rotate
angle = self.get_rotate_angle(-180, 180, 5)
img, lb = self.rotate_img_lb(img, lb, angle)
# flip lr
if random.random() < 0.5:
img = img[:,:,::-1]
lb = lb[:,::-1]
# flip ud
if random.random() < 0.5:
img = img[:,::-1,:]
lb = lb[::-1,:]
return img, lb
def __getitem__(self, index):
datafiles = self.files[index]
item_idx = self.item_idx_list[index]
image = self.cache_img[item_idx].copy()
label = self.cache_label[item_idx].copy()
size = image.shape
name = datafiles["name"]
image, label = self.rescale(image, label)
image = np.array(image, dtype=np.float32)
if self.network == "resnet101":
# mean = (102.9801, 115.9465, 122.7717)
# "mean_value_b" : 141.29403686523437,
# "mean_value_g" : 123.58832550048828,
# "mean_value_r" : 172.43679809570312,
mean = (172.43679809570312, 123.58832550048828, 141.29403686523437)
image = image[:,:,::-1]
image -= mean
elif self.network == "mobilenetv2":
mean = (0.485, 0.456, 0.406)
var = (0.229, 0.224, 0.225)
# print("network: {}, mean: {}, var: {}".format(self.network, mean, var))
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
elif self.network == "wide_resnet38":
mean = (0.41738699, 0.45732192, 0.46886091)
var = (0.25685097, 0.26509955, 0.29067996)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
image = image.transpose((2, 0, 1))
image, label = self.adv_img_lb(image, label)
img_h, img_w = label.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(image[:,h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
# [0, 255] => [0, 1]
ret, label = cv2.threshold(label, 127, 1, cv2.THRESH_BINARY)
label = np.array(label, dtype=np.int64)
return image.copy(), label.copy(), np.array(size), name
class LeadBangTest(data.Dataset):
def __init__(self, root, max_iters=None,
scale=True, mirror=True, ignore_label=255, network="resnet101"):
self.root = root
# self.crop_h, self.crop_w = crop_size
self.crop_h = 512
self.crop_w = 512
self.img_width = 512
self.img_height = 512
self.scale = scale
self.ignore_label = ignore_label
self.is_mirror = mirror
self.network = network
self.files = []
self.cache_img = {}
self.cache_label = {}
self.item_idx_list = []
for item_idx in range(1463, 2352):
self.item_idx_list.append(item_idx)
img_path = 'source/' + str(item_idx) + ".bmp"
label_path = 'label/' + str(item_idx) + ".bmp"
img_file = osp.join(self.root, img_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": str(item_idx),
"weight": 1
})
print("label: ", label_file)
self.cache_img[item_idx] = cv2.imread(img_file)
self.cache_label[item_idx] = 255-cv2.imread(label_file, 0)
print('{} images are loaded!'.format(2352-1463))
def __len__(self):
return len(self.files)
def id2trainId(self, label, reverse=False):
label_copy = label.copy()
if reverse:
for v, k in self.id_to_trainid.items():
label_copy[label == k] = v
else:
for k, v in self.id_to_trainid.items():
label_copy[label == k] = v
return label_copy
def rescale(self, image, label):
image = cv2.resize(image, (self.img_width, self.img_height))
label = cv2.resize(label, (self.img_width, self.img_height))
return image, label
def __getitem__(self, index):
datafiles = self.files[index]
item_idx = self.item_idx_list[index]
image = self.cache_img[item_idx].copy()
label = self.cache_label[item_idx].copy()
size = image.shape
name = datafiles["name"]
image, label = self.rescale(image, label)
image = np.array(image, dtype=np.float32)
if self.network == "resnet101":
# mean = (102.9801, 115.9465, 122.7717)
mean = (172.43679809570312, 123.58832550048828, 141.29403686523437)
image = image[:,:,::-1]
image -= mean
elif self.network == "mobilenetv2":
mean = (0.485, 0.456, 0.406)
var = (0.229, 0.224, 0.225)
# print("network: {}, mean: {}, var: {}".format(self.network, mean, var))
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
elif self.network == "wide_resnet38":
mean = (0.41738699, 0.45732192, 0.46886091)
var = (0.25685097, 0.26509955, 0.29067996)
image = image[:,:,::-1]
image /= 255
image -= mean
image /= var
image = image.transpose((2, 0, 1))
img_h, img_w = label.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(image[:,h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
# [0, 255] => [0, 1]
ret, label = cv2.threshold(label, 127, 1, cv2.THRESH_BINARY)
label = np.array(label, dtype=np.int64)
label = np.asarray(label, dtype=np.int64)
return image.copy(), label.copy(), np.array(size), name
def test_train_leadbang():
dst = LeadBangTrain("./leadbang/")
trainloader = data.DataLoader(dst, batch_size=1, num_workers=0)
with open("./list/cityscapes/trainval.lst") as f:
train_list = f.readlines()
train_list = [x.strip() for x in train_list]
f_w = open("./list/cityscapes/bus_truck_train.lst", "w")
for i, dt in enumerate(trainloader):
imgs, labels, _, name = dt
img = imgs.numpy()
lb = labels.numpy()
print(name)
print(img.shape)
print(lb.shape)
name = name[0]
img = np.transpose(img[0], (1,2,0))
img += (172.43679809570312, 123.58832550048828, 141.29403686523437)
img = img[:,:,::-1]
img = np.array(img, dtype=np.uint8)
lb = 255 - lb[0] * 255
lb = np.asarray(lb, dtype=np.uint8)
cv2.imshow( "img", img)
cv2.imshow( "lb", lb)
cv2.waitKey(0)
def test_test_leadbang():
dst = LeadBangTest("./leadbang/")
trainloader = data.DataLoader(dst, batch_size=1, num_workers=0)
with open("./list/cityscapes/trainval.lst") as f:
train_list = f.readlines()
train_list = [x.strip() for x in train_list]
f_w = open("./list/cityscapes/bus_truck_train.lst", "w")
for i, dt in enumerate(trainloader):
imgs, labels, _, name = dt
img = imgs.numpy()
lb = labels.numpy()
print(name)
print(img.shape)
print(lb.shape)
name = name[0]
img = np.transpose(img[0], (1,2,0))
img += (172.43679809570312, 123.58832550048828, 141.29403686523437)
img = img[:,:,::-1]
img = np.array(img, dtype=np.uint8)
lb = 255 - lb[0] * 255
lb = np.asarray(lb, dtype=np.uint8)
cv2.imshow( "img", img)
cv2.imshow( "lb", lb)
cv2.waitKey(0)
if __name__ == '__main__':
test_train_leadbang()
# test_test_leadbang()
| [
"torch.utils.data.DataLoader"
] | 0.4.1 | liqile1/OCNet.pytorch | 5fb733adbf178ccc8040197057e3277896b3dc12 |
1.7 | """
Network definitions from https://github.com/ferrine/hyrnn
"""
import geoopt
import geoopt.manifolds.stereographic.math as gmath
import numpy as np
import torch.nn
import torch.nn.functional
from torch.cuda.amp import autocast
def mobius_linear(
input,
weight,
bias=None,
hyperbolic_input=True,
hyperbolic_bias=True,
nonlin=None,
k=-1.0,
):
k = torch.tensor(k)
if hyperbolic_input:
output = mobius_matvec(weight, input, k=k)
else:
output = torch.nn.functional.linear(input, weight)
output = gmath.expmap0(output, k=k)
if bias is not None:
if not hyperbolic_bias:
bias = gmath.expmap0(bias, k=k)
output = gmath.mobius_add(output, bias.unsqueeze(0).expand_as(output), k=k)
if nonlin is not None:
output = gmath.mobius_fn_apply(nonlin, output, k=k)
output = gmath.project(output, k=k)
return output
def mobius_matvec(m: torch.Tensor, x: torch.Tensor, *, k: torch.Tensor, dim=-1):
return _mobius_matvec(m, x, k, dim=dim)
def _mobius_matvec(m: torch.Tensor, x: torch.Tensor, k: torch.Tensor, dim: int = -1):
if m.dim() > 2 and dim != -1:
raise RuntimeError(
"broadcasted Möbius matvec is supported for the last dim only"
)
x_norm = x.norm(dim=dim, keepdim=True, p=2).clamp_min(1e-15)
if dim != -1 or m.dim() == 2:
# mx = torch.tensordot(x, m, [dim], [1])
mx = torch.matmul(m, x.transpose(1, 0)).transpose(1, 0)
else:
mx = torch.matmul(m, x.unsqueeze(-1)).squeeze(-1)
mx_norm = mx.norm(dim=dim, keepdim=True, p=2).clamp_min(1e-15)
res_c = gmath.tan_k(mx_norm / x_norm * gmath.artan_k(x_norm, k), k) * (mx / mx_norm)
cond = (mx == 0).prod(dim=dim, keepdim=True, dtype=torch.uint8)
res_0 = torch.zeros(1, dtype=res_c.dtype, device=res_c.device)
res = torch.where(cond, res_0, res_c)
return res
def one_rnn_transform(W, h, U, x, b, k):
W_otimes_h = gmath.mobius_matvec(W, h, k=k)
U_otimes_x = gmath.mobius_matvec(U, x, k=k)
Wh_plus_Ux = gmath.mobius_add(W_otimes_h, U_otimes_x, k=k)
return gmath.mobius_add(Wh_plus_Ux, b, k=k)
def mobius_gru_cell(
input: torch.Tensor,
hx: torch.Tensor,
weight_ih: torch.Tensor,
weight_hh: torch.Tensor,
bias: torch.Tensor,
k: torch.Tensor,
nonlin=None,
):
W_ir, W_ih, W_iz = weight_ih.chunk(3)
b_r, b_h, b_z = bias
W_hr, W_hh, W_hz = weight_hh.chunk(3)
z_t = gmath.logmap0(one_rnn_transform(W_hz, hx, W_iz, input, b_z, k), k=k).sigmoid()
r_t = gmath.logmap0(one_rnn_transform(W_hr, hx, W_ir, input, b_r, k), k=k).sigmoid()
rh_t = gmath.mobius_pointwise_mul(r_t, hx, k=k)
h_tilde = one_rnn_transform(W_hh, rh_t, W_ih, input, b_h, k)
if nonlin is not None:
h_tilde = gmath.mobius_fn_apply(nonlin, h_tilde, k=k)
delta_h = gmath.mobius_add(-hx, h_tilde, k=k)
h_out = gmath.mobius_add(hx, gmath.mobius_pointwise_mul(z_t, delta_h, k=k), k=k)
return h_out
def mobius_gru_loop(
input: torch.Tensor,
h0: torch.Tensor,
weight_ih: torch.Tensor,
weight_hh: torch.Tensor,
bias: torch.Tensor,
k: torch.Tensor,
batch_sizes=None,
hyperbolic_input: bool = False,
hyperbolic_hidden_state0: bool = False,
nonlin=None,
):
if not hyperbolic_hidden_state0:
hx = gmath.expmap0(h0, k=k)
else:
hx = h0
if not hyperbolic_input:
input = gmath.expmap0(input, k=k)
outs = []
if batch_sizes is None:
input_unbinded = input.unbind(0)
for t in range(input.size(0)):
hx = mobius_gru_cell(
input=input_unbinded[t],
hx=hx,
weight_ih=weight_ih,
weight_hh=weight_hh,
bias=bias,
nonlin=nonlin,
k=k,
)
outs.append(hx)
outs = torch.stack(outs)
h_last = hx
else:
h_last = []
T = len(batch_sizes) - 1
for i, t in enumerate(range(batch_sizes.size(0))):
ix, input = input[: batch_sizes[t]], input[batch_sizes[t]:]
hx = mobius_gru_cell(
input=ix,
hx=hx,
weight_ih=weight_ih,
weight_hh=weight_hh,
bias=bias,
nonlin=nonlin,
k=k,
)
outs.append(hx)
if t < T:
hx, ht = hx[: batch_sizes[t + 1]], hx[batch_sizes[t + 1]:]
h_last.append(ht)
else:
h_last.append(hx)
h_last.reverse()
h_last = torch.cat(h_last)
outs = torch.cat(outs)
return outs, h_last
class MobiusLinear(torch.nn.Linear):
def __init__(
self,
*args,
hyperbolic_input=True,
hyperbolic_bias=True,
nonlin=None,
k=-1.0,
fp64_hyper=True,
**kwargs
):
k = torch.tensor(k)
super().__init__(*args, **kwargs)
if self.bias is not None:
if hyperbolic_bias:
self.ball = manifold = geoopt.PoincareBall(c=k.abs())
self.bias = geoopt.ManifoldParameter(self.bias, manifold=manifold)
with torch.no_grad():
# self.bias.set_(gmath.expmap0(self.bias.normal_() / 4, k=k))
self.bias.set_(gmath.expmap0(self.bias.normal_() / 400, k=k))
with torch.no_grad():
# 1e-2 was the original value in the code. The updated one is from HNN++
std = 1 / np.sqrt(2 * self.weight.shape[0] * self.weight.shape[1])
# Actually, we divide that by 100 so that it starts really small and far from the border
std = std / 100
self.weight.normal_(std=std)
self.hyperbolic_bias = hyperbolic_bias
self.hyperbolic_input = hyperbolic_input
self.nonlin = nonlin
self.k = k
self.fp64_hyper = fp64_hyper
def forward(self, input):
if self.fp64_hyper:
input = input.double()
else:
input = input.float()
with autocast(enabled=False): # Do not use fp16
return mobius_linear(
input,
weight=self.weight,
bias=self.bias,
hyperbolic_input=self.hyperbolic_input,
nonlin=self.nonlin,
hyperbolic_bias=self.hyperbolic_bias,
k=self.k,
)
def extra_repr(self):
info = super().extra_repr()
info += "c={}, hyperbolic_input={}".format(self.ball.c, self.hyperbolic_input)
if self.bias is not None:
info = ", hyperbolic_bias={}".format(self.hyperbolic_bias)
return info
class MobiusDist2Hyperplane(torch.nn.Module):
def __init__(self, in_features, out_features, k=-1.0, fp64_hyper=True):
k = torch.tensor(k)
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.ball = ball = geoopt.PoincareBall(c=k.abs())
self.sphere = sphere = geoopt.manifolds.Sphere()
self.scale = torch.nn.Parameter(torch.zeros(out_features))
point = torch.randn(out_features, in_features) / 4
point = gmath.expmap0(point, k=k)
tangent = torch.randn(out_features, in_features)
self.point = geoopt.ManifoldParameter(point, manifold=ball)
self.fp64_hyper = fp64_hyper
with torch.no_grad():
self.tangent = geoopt.ManifoldParameter(tangent, manifold=sphere).proj_()
def forward(self, input):
if self.fp64_hyper:
input = input.double()
else:
input = input.float()
with autocast(enabled=False): # Do not use fp16
input = input.unsqueeze(-2)
distance = gmath.dist2plane(
x=input, p=self.point, a=self.tangent, k=self.ball.c, signed=True
)
return distance * self.scale.exp()
def extra_repr(self):
return (
"in_features={in_features}, out_features={out_features}"
# "c={ball.c}".format(
# **self.__dict__
# )
)
| [
"torch.cuda.amp.autocast"
] | 1.7.0 | jacv050/hyperfuture | 54288230656c7a8cc0b825f9e397d690408d9e42 |
0.4 | """
SparseNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
"""
__all__ = ['SparseNet', 'sparsenet121', 'sparsenet161', 'sparsenet169', 'sparsenet201', 'sparsenet264']
import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
def sparsenet_exponential_fetch(lst):
"""
SparseNet's specific exponential fetch.
Parameters:
----------
lst : list
List of something.
Returns
-------
list
Filtered list.
"""
return [lst[len(lst) - 2**i] for i in range(1 + math.floor(math.log(len(lst), 2)))]
class SparseBlock(nn.Module):
"""
SparseNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate):
super(SparseBlock, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
mid_channels = out_channels * bn_size
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class SparseStage(nn.Module):
"""
SparseNet stage.
Parameters:
----------
in_channels : int
Number of input channels.
channels_per_stage : list of int
Number of output channels for each unit in stage.
growth_rate : int
Growth rate for blocks.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
do_transition : bool
Whether use transition block.
"""
def __init__(self,
in_channels,
channels_per_stage,
growth_rate,
dropout_rate,
do_transition):
super(SparseStage, self).__init__()
self.do_transition = do_transition
if self.do_transition:
self.trans = TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2))
in_channels = in_channels // 2
self.blocks = nn.Sequential()
for i, out_channels in enumerate(channels_per_stage):
self.blocks.add_module("block{}".format(i + 1), SparseBlock(
in_channels=in_channels,
out_channels=growth_rate,
dropout_rate=dropout_rate))
in_channels = out_channels
def forward(self, x):
if self.do_transition:
x = self.trans(x)
outs = [x]
for block in self.blocks._modules.values():
y = block(x)
outs.append(y)
flt_outs = sparsenet_exponential_fetch(outs)
x = torch.cat(tuple(flt_outs), dim=1)
return x
class SparseNet(nn.Module):
"""
SparseNet model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
growth_rate : int
Growth rate for blocks.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
growth_rate,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SparseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SparseStage(
in_channels=in_channels,
channels_per_stage=channels_per_stage,
growth_rate=growth_rate,
dropout_rate=dropout_rate,
do_transition=(i != 0))
in_channels = channels_per_stage[-1]
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_sparsenet(num_layers,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SparseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
elif num_layers == 264:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 64, 48]
else:
raise ValueError("Unsupported SparseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [sum(sparsenet_exponential_fetch([xj[0]] + [yj[0]] * (yj[1] + 1)))],
zip([growth_rate] * yi, range(yi)),
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = SparseNet(
channels=channels,
init_block_channels=init_block_channels,
growth_rate=growth_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sparsenet121(**kwargs):
"""
SparseNet-121 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=121, model_name="sparsenet121", **kwargs)
def sparsenet161(**kwargs):
"""
SparseNet-161 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=161, model_name="sparsenet161", **kwargs)
def sparsenet169(**kwargs):
"""
SparseNet-169 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=169, model_name="sparsenet169", **kwargs)
def sparsenet201(**kwargs):
"""
SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
def sparsenet264(**kwargs):
"""
SparseNet-264 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=264, model_name="sparsenet264", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sparsenet121,
sparsenet161,
sparsenet169,
sparsenet201,
sparsenet264,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sparsenet121 or weight_count == 3250824)
assert (model != sparsenet161 or weight_count == 9853288)
assert (model != sparsenet169 or weight_count == 4709864)
assert (model != sparsenet201 or weight_count == 5703144)
assert (model != sparsenet264 or weight_count == 7717224)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.kaiming_uniform_",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.randn"
] | 0.4.0 | oliviaweng/imgclsmob | a1f1f52eecbb841fa878bff4d3c311b79864835d |
1.7 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
| [
"torch.cuda.device_count"
] | 1.7.1 | jaehwlee/K-wav2vec | 6ba33f0ef7d2399e4c52a3c80d83a092dac4daa9 |
1.4 | import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from tool.region_loss import RegionLoss
from tool.yolo_layer import YoloLayer
from tool.config import *
from tool.torch_utils import *
class Mish(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x * (torch.tanh(torch.nn.functional.softplus(x)))
return x
class MaxPoolDark(nn.Module):
def __init__(self, size=2, stride=1):
super(MaxPoolDark, self).__init__()
self.size = size
self.stride = stride
def forward(self, x):
'''
darknet output_size = (input_size + p - k) / s +1
p : padding = k - 1
k : size
s : stride
torch output_size = (input_size + 2*p -k) / s +1
p : padding = k//2
'''
p = self.size // 2
if ((x.shape[2] - 1) // self.stride) != ((x.shape[2] + 2 * p - self.size) // self.stride):
padding1 = (self.size - 1) // 2
padding2 = padding1 + 1
else:
padding1 = (self.size - 1) // 2
padding2 = padding1
if ((x.shape[3] - 1) // self.stride) != ((x.shape[3] + 2 * p - self.size) // self.stride):
padding3 = (self.size - 1) // 2
padding4 = padding3 + 1
else:
padding3 = (self.size - 1) // 2
padding4 = padding3
x = F.max_pool2d(F.pad(x, (padding3, padding4, padding1, padding2), mode='replicate'),
self.size, stride=self.stride)
return x
class Upsample_expand(nn.Module):
def __init__(self, stride=2):
super(Upsample_expand, self).__init__()
self.stride = stride
def forward(self, x):
assert (x.data.dim() == 4)
x = x.view(x.size(0), x.size(1), x.size(2), 1, x.size(3), 1).\
expand(x.size(0), x.size(1), x.size(2), self.stride, x.size(3), self.stride).contiguous().\
view(x.size(0), x.size(1), x.size(2) * self.stride, x.size(3) * self.stride)
return x
class Upsample_interpolate(nn.Module):
def __init__(self, stride):
super(Upsample_interpolate, self).__init__()
self.stride = stride
def forward(self, x):
assert (x.data.dim() == 4)
out = F.interpolate(x, size=(x.size(2) * self.stride, x.size(3) * self.stride), mode='nearest')
return out
class Reorg(nn.Module):
def __init__(self, stride=2):
super(Reorg, self).__init__()
self.stride = stride
def forward(self, x):
stride = self.stride
assert (x.data.dim() == 4)
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
assert (H % stride == 0)
assert (W % stride == 0)
ws = stride
hs = stride
x = x.view(B, C, H / hs, hs, W / ws, ws).transpose(3, 4).contiguous()
x = x.view(B, C, H / hs * W / ws, hs * ws).transpose(2, 3).contiguous()
x = x.view(B, C, hs * ws, H / hs, W / ws).transpose(1, 2).contiguous()
x = x.view(B, hs * ws * C, H / hs, W / ws)
return x
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
N = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
x = F.avg_pool2d(x, (H, W))
x = x.view(N, C)
return x
# for route, shortcut and sam
class EmptyModule(nn.Module):
def __init__(self):
super(EmptyModule, self).__init__()
def forward(self, x):
return x
# support route shortcut and reorg
class Darknet(nn.Module):
def __init__(self, cfgfile, inference=False):
super(Darknet, self).__init__()
self.inference = inference
self.training = not self.inference
self.blocks = parse_cfg(cfgfile)
self.width = int(self.blocks[0]['width'])
self.height = int(self.blocks[0]['height'])
self.models = self.create_network(self.blocks) # merge conv, bn,leaky
self.loss = self.models[len(self.models) - 1]
if self.blocks[(len(self.blocks) - 1)]['type'] == 'region':
self.anchors = self.loss.anchors
self.num_anchors = self.loss.num_anchors
self.anchor_step = self.loss.anchor_step
self.num_classes = self.loss.num_classes
self.header = torch.IntTensor([0, 0, 0, 0])
self.seen = 0
def forward(self, x):
ind = -2
self.loss = None
outputs = dict()
out_boxes = []
for block in self.blocks:
ind = ind + 1
# if ind > 0:
# return x
if block['type'] == 'net':
continue
elif block['type'] in ['convolutional', 'maxpool', 'reorg', 'upsample', 'avgpool', 'softmax', 'connected']:
x = self.models[ind](x)
outputs[ind] = x
elif block['type'] == 'route':
layers = block['layers'].split(',')
layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers]
if len(layers) == 1:
if 'groups' not in block.keys() or int(block['groups']) == 1:
x = outputs[layers[0]]
outputs[ind] = x
else:
groups = int(block['groups'])
group_id = int(block['group_id'])
_, b, _, _ = outputs[layers[0]].shape
x = outputs[layers[0]][:, b // groups * group_id:b // groups * (group_id + 1)]
outputs[ind] = x
elif len(layers) == 2:
x1 = outputs[layers[0]]
x2 = outputs[layers[1]]
x = torch.cat((x1, x2), 1)
outputs[ind] = x
elif len(layers) == 4:
x1 = outputs[layers[0]]
x2 = outputs[layers[1]]
x3 = outputs[layers[2]]
x4 = outputs[layers[3]]
x = torch.cat((x1, x2, x3, x4), 1)
outputs[ind] = x
else:
print("rounte number > 2 ,is {}".format(len(layers)))
elif block['type'] == 'shortcut':
from_layer = int(block['from'])
activation = block['activation']
from_layer = from_layer if from_layer > 0 else from_layer + ind
x1 = outputs[from_layer]
x2 = outputs[ind - 1]
x = x1 + x2
if activation == 'leaky':
x = F.leaky_relu(x, 0.1, inplace=True)
elif activation == 'relu':
x = F.relu(x, inplace=True)
outputs[ind] = x
elif block['type'] == 'sam':
from_layer = int(block['from'])
from_layer = from_layer if from_layer > 0 else from_layer + ind
x1 = outputs[from_layer]
x2 = outputs[ind - 1]
x = x1 * x2
outputs[ind] = x
elif block['type'] == 'region':
continue
if self.loss:
self.loss = self.loss + self.models[ind](x)
else:
self.loss = self.models[ind](x)
outputs[ind] = None
elif block['type'] == 'yolo':
# if self.training:
# pass
# else:
# boxes = self.models[ind](x)
# out_boxes.append(boxes)
boxes = self.models[ind](x)
out_boxes.append(boxes)
elif block['type'] == 'cost':
continue
else:
print('unknown type %s' % (block['type']))
if self.training:
return out_boxes
else:
return get_region_boxes(out_boxes)
def print_network(self):
print_cfg(self.blocks)
def create_network(self, blocks):
models = nn.ModuleList()
prev_filters = 3
out_filters = []
prev_stride = 1
out_strides = []
conv_id = 0
for block in blocks:
if block['type'] == 'net':
prev_filters = int(block['channels'])
continue
elif block['type'] == 'convolutional':
conv_id = conv_id + 1
batch_normalize = int(block['batch_normalize'])
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size - 1) // 2 if is_pad else 0
activation = block['activation']
model = nn.Sequential()
if batch_normalize:
model.add_module('conv{0}'.format(conv_id),
nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
# model.add_module('bn{0}'.format(conv_id), BN2d(filters))
else:
model.add_module('conv{0}'.format(conv_id),
nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
if activation == 'leaky':
model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
elif activation == 'relu':
model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True))
elif activation == 'mish':
model.add_module('mish{0}'.format(conv_id), Mish())
elif activation == 'linear':
pass
elif activation == 'logistic':
model.add_module('sigmoid{0}'.format(conv_id), nn.Sigmoid())
else:
print("No convolutional activation named {}".format(activation))
prev_filters = filters
out_filters.append(prev_filters)
prev_stride = stride * prev_stride
out_strides.append(prev_stride)
models.append(model)
elif block['type'] == 'maxpool':
pool_size = int(block['size'])
stride = int(block['stride'])
if stride == 1 and pool_size % 2:
# You can use Maxpooldark instead, here is convenient to convert onnx.
# Example: [maxpool] size=3 stride=1
model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=pool_size // 2)
elif stride == pool_size:
# You can use Maxpooldark instead, here is convenient to convert onnx.
# Example: [maxpool] size=2 stride=2
model = nn.MaxPool2d(kernel_size=pool_size, stride=stride, padding=0)
else:
model = MaxPoolDark(pool_size, stride)
out_filters.append(prev_filters)
prev_stride = stride * prev_stride
out_strides.append(prev_stride)
models.append(model)
elif block['type'] == 'avgpool':
model = GlobalAvgPool2d()
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'softmax':
model = nn.Softmax()
out_strides.append(prev_stride)
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'cost':
if block['_type'] == 'sse':
model = nn.MSELoss(reduction='mean')
elif block['_type'] == 'L1':
model = nn.L1Loss(reduction='mean')
elif block['_type'] == 'smooth':
model = nn.SmoothL1Loss(reduction='mean')
out_filters.append(1)
out_strides.append(prev_stride)
models.append(model)
elif block['type'] == 'reorg':
stride = int(block['stride'])
prev_filters = stride * stride * prev_filters
out_filters.append(prev_filters)
prev_stride = prev_stride * stride
out_strides.append(prev_stride)
models.append(Reorg(stride))
elif block['type'] == 'upsample':
stride = int(block['stride'])
out_filters.append(prev_filters)
prev_stride = prev_stride // stride
out_strides.append(prev_stride)
models.append(Upsample_expand(stride))
# models.append(Upsample_interpolate(stride))
elif block['type'] == 'route':
layers = block['layers'].split(',')
ind = len(models)
layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers]
if len(layers) == 1:
if 'groups' not in block.keys() or int(block['groups']) == 1:
prev_filters = out_filters[layers[0]]
prev_stride = out_strides[layers[0]]
else:
prev_filters = out_filters[layers[0]] // int(block['groups'])
prev_stride = out_strides[layers[0]] // int(block['groups'])
elif len(layers) == 2:
assert (layers[0] == ind - 1 or layers[1] == ind - 1)
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
prev_stride = out_strides[layers[0]]
elif len(layers) == 4:
assert (layers[0] == ind - 1)
prev_filters = out_filters[layers[0]] + out_filters[layers[1]] + out_filters[layers[2]] + \
out_filters[layers[3]]
prev_stride = out_strides[layers[0]]
else:
print("route error!!!")
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(EmptyModule())
elif block['type'] == 'shortcut':
ind = len(models)
prev_filters = out_filters[ind - 1]
out_filters.append(prev_filters)
prev_stride = out_strides[ind - 1]
out_strides.append(prev_stride)
models.append(EmptyModule())
elif block['type'] == 'sam':
ind = len(models)
prev_filters = out_filters[ind - 1]
out_filters.append(prev_filters)
prev_stride = out_strides[ind - 1]
out_strides.append(prev_stride)
models.append(EmptyModule())
elif block['type'] == 'connected':
filters = int(block['output'])
if block['activation'] == 'linear':
model = nn.Linear(prev_filters, filters)
elif block['activation'] == 'leaky':
model = nn.Sequential(
nn.Linear(prev_filters, filters),
nn.LeakyReLU(0.1, inplace=True))
elif block['activation'] == 'relu':
model = nn.Sequential(
nn.Linear(prev_filters, filters),
nn.ReLU(inplace=True))
prev_filters = filters
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(model)
elif block['type'] == 'region':
loss = RegionLoss()
anchors = block['anchors'].split(',')
loss.anchors = [float(i) for i in anchors]
loss.num_classes = int(block['classes'])
loss.num_anchors = int(block['num'])
loss.anchor_step = len(loss.anchors) // loss.num_anchors
loss.object_scale = float(block['object_scale'])
loss.noobject_scale = float(block['noobject_scale'])
loss.class_scale = float(block['class_scale'])
loss.coord_scale = float(block['coord_scale'])
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(loss)
elif block['type'] == 'yolo':
yolo_layer = YoloLayer()
anchors = block['anchors'].split(',')
anchor_mask = block['mask'].split(',')
yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
yolo_layer.anchors = [float(i) for i in anchors]
yolo_layer.num_classes = int(block['classes'])
self.num_classes = yolo_layer.num_classes
yolo_layer.num_anchors = int(block['num'])
yolo_layer.anchor_step = len(yolo_layer.anchors) // yolo_layer.num_anchors
yolo_layer.stride = prev_stride
yolo_layer.scale_x_y = float(block['scale_x_y'])
# yolo_layer.object_scale = float(block['object_scale'])
# yolo_layer.noobject_scale = float(block['noobject_scale'])
# yolo_layer.class_scale = float(block['class_scale'])
# yolo_layer.coord_scale = float(block['coord_scale'])
out_filters.append(prev_filters)
out_strides.append(prev_stride)
models.append(yolo_layer)
else:
print('unknown type %s' % (block['type']))
return models
def load_weights(self, weightfile):
fp = open(weightfile, 'rb')
header = np.fromfile(fp, count=5, dtype=np.int32)
self.header = torch.from_numpy(header)
self.seen = self.header[3]
buf = np.fromfile(fp, dtype=np.float32)
fp.close()
start = 0
ind = -2
for block in self.blocks:
if start >= buf.size:
break
ind = ind + 1
if block['type'] == 'net':
continue
elif block['type'] == 'convolutional':
model = self.models[ind]
batch_normalize = int(block['batch_normalize'])
if batch_normalize:
start = load_conv_bn(buf, start, model[0], model[1])
else:
start = load_conv(buf, start, model[0])
elif block['type'] == 'connected':
model = self.models[ind]
if block['activation'] != 'linear':
start = load_fc(buf, start, model[0])
else:
start = load_fc(buf, start, model)
elif block['type'] == 'maxpool':
pass
elif block['type'] == 'reorg':
pass
elif block['type'] == 'upsample':
pass
elif block['type'] == 'route':
pass
elif block['type'] == 'shortcut':
pass
elif block['type'] == 'sam':
pass
elif block['type'] == 'region':
pass
elif block['type'] == 'yolo':
pass
elif block['type'] == 'avgpool':
pass
elif block['type'] == 'softmax':
pass
elif block['type'] == 'cost':
pass
else:
print('unknown type %s' % (block['type']))
# def save_weights(self, outfile, cutoff=0):
# if cutoff <= 0:
# cutoff = len(self.blocks) - 1
#
# fp = open(outfile, 'wb')
# self.header[3] = self.seen
# header = self.header
# header.numpy().tofile(fp)
#
# ind = -1
# for blockId in range(1, cutoff + 1):
# ind = ind + 1
# block = self.blocks[blockId]
# if block['type'] == 'convolutional':
# model = self.models[ind]
# batch_normalize = int(block['batch_normalize'])
# if batch_normalize:
# save_conv_bn(fp, model[0], model[1])
# else:
# save_conv(fp, model[0])
# elif block['type'] == 'connected':
# model = self.models[ind]
# if block['activation'] != 'linear':
# save_fc(fc, model)
# else:
# save_fc(fc, model[0])
# elif block['type'] == 'maxpool':
# pass
# elif block['type'] == 'reorg':
# pass
# elif block['type'] == 'upsample':
# pass
# elif block['type'] == 'route':
# pass
# elif block['type'] == 'shortcut':
# pass
# elif block['type'] == 'sam':
# pass
# elif block['type'] == 'region':
# pass
# elif block['type'] == 'yolo':
# pass
# elif block['type'] == 'avgpool':
# pass
# elif block['type'] == 'softmax':
# pass
# elif block['type'] == 'cost':
# pass
# else:
# print('unknown type %s' % (block['type']))
# fp.close()
| [
"torch.nn.Linear",
"torch.nn.functional.avg_pool2d",
"torch.nn.MSELoss",
"torch.nn.ModuleList",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.Softmax",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid",
"torch.nn.functional.relu",
"torch.nn.ReLU",
"torch.nn.L1Loss",
"torch.nn.Conv2d",
"torch.nn.SmoothL1Loss",
"torch.nn.functional.pad",
"torch.nn.functional.leaky_relu"
] | 1.4.0 | ajsanjoaquin/pytorch-YOLOv4 | dbc10cdc43668f29647ea2019ec13c4109d590c1 |
1.7 | #!/usr/bin/env python3
import unittest
import torch
from gpytorch.lazy import CholLazyTensor, TriangularLazyTensor
from gpytorch.test.lazy_tensor_test_case import LazyTensorTestCase
class TestCholLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
should_call_cg = False
should_call_lanczos = False
def create_lazy_tensor(self):
chol = torch.tensor(
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
dtype=torch.float,
requires_grad=True,
)
return CholLazyTensor(TriangularLazyTensor(chol))
def evaluate_lazy_tensor(self, lazy_tensor):
chol = lazy_tensor.root.evaluate()
return chol.matmul(chol.transpose(-1, -2))
class TestCholLazyTensorBatch(TestCholLazyTensor):
seed = 0
def create_lazy_tensor(self):
chol = torch.tensor(
[
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
[[2, 0, 0, 0, 0], [3, 1, 0, 0, 0], [-2, 3, 2, 0, 0], [-2, 1, -1, 3, 0], [-4, -4, 5, 2, 3]],
],
dtype=torch.float,
)
chol.add_(torch.eye(5).unsqueeze(0))
chol.requires_grad_(True)
return CholLazyTensor(TriangularLazyTensor(chol))
class TestCholLazyTensorMultiBatch(TestCholLazyTensor):
seed = 0
# Because these LTs are large, we'll skil the big tests
should_test_sample = False
skip_slq_tests = True
def create_lazy_tensor(self):
chol = torch.tensor(
[
[[3, 0, 0, 0, 0], [-1, 2, 0, 0, 0], [1, 4, 1, 0, 0], [0, 2, 3, 2, 0], [-4, -2, 1, 3, 4]],
[[2, 0, 0, 0, 0], [3, 1, 0, 0, 0], [-2, 3, 2, 0, 0], [-2, 1, -1, 3, 0], [-4, -4, 5, 2, 3]],
],
dtype=torch.float,
)
chol = chol.repeat(3, 1, 1, 1)
chol[1].mul_(2)
chol[2].mul_(0.5)
chol.add_(torch.eye(5).unsqueeze_(0).unsqueeze_(0))
chol.requires_grad_(True)
return CholLazyTensor(TriangularLazyTensor(chol))
if __name__ == "__main__":
unittest.main()
| [
"torch.eye",
"torch.tensor"
] | 1.7 | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf |
1.7 | #!/usr/bin/env python3
import math
import unittest
import torch
from gpytorch.kernels import CosineKernel
class TestCosineKernel(unittest.TestCase):
def test_computes_periodic_function(self):
a = torch.tensor([[4, 1], [2, 2], [8, 0]], dtype=torch.float)
b = torch.tensor([[0, 0], [2, 1], [1, 0]], dtype=torch.float)
period = 1
kernel = CosineKernel().initialize(period_length=period)
kernel.eval()
actual = torch.zeros(3, 3)
for i in range(3):
for j in range(3):
actual[i, j] = torch.cos(math.pi * ((a[i] - b[j]) / period).norm(2, dim=-1))
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# diag
res = kernel(a, b).diag()
actual = actual.diag()
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims
actual = torch.zeros(2, 3, 3)
for i in range(3):
for j in range(3):
for l in range(2):
actual[l, i, j] = torch.cos(math.pi * ((a[i, l] - b[j, l]) / period))
res = kernel(a, b, last_dim_is_batch=True).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims + diag
res = kernel(a, b, last_dim_is_batch=True).diag()
actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
self.assertLess(torch.norm(res - actual), 1e-5)
def test_batch(self):
a = torch.tensor([[4, 2, 8], [1, 2, 3]], dtype=torch.float).view(2, 3, 1)
b = torch.tensor([[0, 2, 1], [-1, 2, 0]], dtype=torch.float).view(2, 3, 1)
period = torch.tensor(1, dtype=torch.float).view(1, 1, 1)
kernel = CosineKernel().initialize(period_length=period)
kernel.eval()
actual = torch.zeros(2, 3, 3)
for k in range(2):
for i in range(3):
for j in range(3):
actual[k, i, j] = torch.cos(math.pi * ((a[k, i] - b[k, j]) / period))
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
def test_batch_separate(self):
a = torch.tensor([[[4, 1], [2, 2], [8, 0]], [[2, 5], [6, 1], [0, 1]]], dtype=torch.float)
b = torch.tensor([[[0, 0], [2, 1], [1, 0]], [[1, 1], [2, 3], [1, 0]]], dtype=torch.float)
period = torch.tensor([1, 2], dtype=torch.float).view(2, 1, 1)
kernel = CosineKernel(batch_shape=torch.Size([2])).initialize(period_length=period)
kernel.eval()
actual = torch.zeros(2, 3, 3)
for k in range(2):
for i in range(3):
for j in range(3):
actual[k, i, j] = torch.cos(math.pi * ((a[k, i] - b[k, j]) / period[k]).norm(2, dim=-1))
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# diag
res = kernel(a, b).diag()
actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims
actual = torch.zeros(2, 2, 3, 3)
for k in range(2):
for i in range(3):
for j in range(3):
for l in range(2):
actual[k, l, i, j] = torch.cos(math.pi * ((a[k, i, l] - b[k, j, l]) / period[k]))
res = kernel(a, b, last_dim_is_batch=True).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims + diag
res = kernel(a, b, last_dim_is_batch=True).diag()
actual = actual.diagonal(dim1=-2, dim2=-1)
self.assertLess(torch.norm(res - actual), 1e-5)
if __name__ == "__main__":
unittest.main()
| [
"torch.zeros",
"torch.cos",
"torch.Size",
"torch.norm",
"torch.tensor"
] | 1.7 | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf |
1.7 | #!/usr/bin/env python3
import torch
from ..utils.broadcasting import _pad_with_singletons
from ..utils.getitem import _equal_indices, _noop_index
from ..utils.memoize import cached
from .lazy_tensor import LazyTensor
from .matmul_lazy_tensor import MatmulLazyTensor
from .non_lazy_tensor import NonLazyTensor, lazify
class RootLazyTensor(LazyTensor):
def __init__(self, root):
root = lazify(root)
super().__init__(root)
self.root = root
def _expand_batch(self, batch_shape):
if len(batch_shape) == 0:
return self
return self.__class__(self.root._expand_batch(batch_shape))
def _get_indices(self, row_index, col_index, *batch_indices):
row_index = row_index.unsqueeze(-1)
col_index = col_index.unsqueeze(-1)
batch_indices = tuple(batch_index.unsqueeze(-1) for batch_index in batch_indices)
inner_index = torch.arange(0, self.root.size(-1), device=self.device)
inner_index = _pad_with_singletons(inner_index, row_index.dim() - 1, 0)
left_tensor = self.root._get_indices(row_index, inner_index, *batch_indices)
if torch.equal(row_index, col_index):
res = left_tensor.pow(2).sum(-1)
else:
right_tensor = self.root._get_indices(col_index, inner_index, *batch_indices)
res = (left_tensor * right_tensor).sum(-1)
return res
def _getitem(self, row_index, col_index, *batch_indices):
# Make sure we're not generating more memory with our "efficient" method
if torch.is_tensor(row_index) and torch.is_tensor(col_index):
num_indices = row_index.numel()
if num_indices > self.matrix_shape.numel():
return lazify(self.evaluate())._getitem(row_index, col_index, *batch_indices)
left_tensor = self.root._getitem(row_index, _noop_index, *batch_indices)
if _equal_indices(row_index, col_index):
res = self.__class__(left_tensor)
else:
right_tensor = self.root._getitem(col_index, _noop_index, *batch_indices)
res = MatmulLazyTensor(left_tensor, right_tensor.transpose(-1, -2))
return res
def _matmul(self, rhs):
return self.root._matmul(self.root._t_matmul(rhs))
def _mul_constant(self, constant):
if constant > 0:
res = self.__class__(self.root._mul_constant(constant.sqrt()))
else:
res = super()._mul_constant(constant)
return res
def _t_matmul(self, rhs):
# Matrix is symmetric
return self._matmul(rhs)
def root_decomposition(self):
return self
def _root_decomposition(self):
return self.root
def _root_decomposition_size(self):
return self.root.size(-1)
def _size(self):
return torch.Size((*self.root.batch_shape, self.root.size(-2), self.root.size(-2)))
def _transpose_nonbatch(self):
return self
def diag(self):
if isinstance(self.root, NonLazyTensor):
return (self.root.tensor ** 2).sum(-1)
else:
return super().diag()
@cached
def evaluate(self):
eval_root = self.root.evaluate()
return torch.matmul(eval_root, eval_root.transpose(-1, -2))
| [
"torch.is_tensor",
"torch.equal"
] | 1.7 | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf |
1.3 | #!/usr/bin/env python3
import math
import warnings
import torch
from .. import settings
from ..distributions import MultivariateNormal
from ..lazy import (
BatchRepeatLazyTensor,
CachedCGLazyTensor,
CholLazyTensor,
DiagLazyTensor,
MatmulLazyTensor,
PsdSumLazyTensor,
RootLazyTensor,
)
from ..module import Module
from ..utils.memoize import cached
from .unwhitened_variational_strategy import UnwhitenedVariationalStrategy
# Remove after 1.0
class WhitenedVariationalStrategy(UnwhitenedVariationalStrategy):
def __init__(self, model, inducing_points, variational_distribution, learn_inducing_locations=True):
warnings.warn(
"WhitenedVariationalStrategy is deprecated. Please use VariationalStrategy instead.", DeprecationWarning
)
super().__init__(model, inducing_points, variational_distribution, learn_inducing_locations)
@cached(name="logdet_memo")
def prior_covar_logdet(self):
return -self.prior_distribution.lazy_covariance_matrix.logdet()
@cached(name="covar_trace_memo")
def covar_trace(self):
variational_covar = self.variational_distribution.covariance_matrix
prior_covar = self.prior_distribution.covariance_matrix
batch_shape = prior_covar.shape[:-2]
return (variational_covar * prior_covar).view(*batch_shape, -1).sum(-1)
@cached(name="mean_diff_inv_quad_memo")
def mean_diff_inv_quad(self):
prior_mean = self.prior_distribution.mean
prior_covar = self.prior_distribution.lazy_covariance_matrix
variational_mean = self.variational_distribution.mean
return prior_covar.inv_quad(variational_mean - prior_mean)
def kl_divergence(self):
variational_dist_u = self.variational_distribution
prior_dist = self.prior_distribution
kl_divergence = 0.5 * sum(
[
# log|k| - log|S|
# = log|K| - log|K var_dist_covar K|
# = -log|K| - log|var_dist_covar|
self.prior_covar_logdet(),
-variational_dist_u.lazy_covariance_matrix.logdet(),
# tr(K^-1 S) = tr(K^1 K var_dist_covar K) = tr(K var_dist_covar)
self.covar_trace(),
# (m - \mu u)^T K^-1 (m - \mu u)
# = (K^-1 (m - \mu u)) K (K^1 (m - \mu u))
# = (var_dist_mean)^T K (var_dist_mean)
self.mean_diff_inv_quad(),
# d
-prior_dist.event_shape.numel(),
]
)
return kl_divergence
def initialize_variational_dist(self):
prior_dist = self.prior_distribution
inv_prior_dist = torch.distributions.MultivariateNormal(
prior_dist.mean,
prior_dist.lazy_covariance_matrix.add_jitter()
.evaluate()
.double()
.inverse()
.type_as(prior_dist.covariance_matrix),
)
self.variational_distribution.initialize_variational_distribution(inv_prior_dist)
def forward(self, x):
r"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
:param torch.Tensor x: Locations x to get the variational posterior of the function values at.
:rtype: ~gpytorch.distributions.MultivariateNormal
:return: The distribution :math:`q(f|x)`
"""
variational_dist = self.variational_distribution
inducing_points = self.inducing_points
if inducing_points.dim() < x.dim():
inducing_points = inducing_points.expand(*x.shape[:-2], *inducing_points.shape[-2:])
if len(variational_dist.batch_shape) < x.dim() - 2:
variational_dist = variational_dist.expand(x.shape[:-2])
# If our points equal the inducing points, we're done
if torch.equal(x, inducing_points):
# De-whiten the prior covar
prior_covar = self.prior_distribution.lazy_covariance_matrix
if isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor):
predictive_covar = RootLazyTensor(prior_covar @ variational_dist.lazy_covariance_matrix.root.evaluate())
else:
predictive_covar = MatmulLazyTensor(prior_covar @ variational_dist.covariance_matrix, prior_covar)
# Cache some values for the KL divergence
if self.training:
self._mean_diff_inv_quad_memo, self._logdet_memo = prior_covar.inv_quad_logdet(
(variational_dist.mean - self.prior_distribution.mean), logdet=True
)
return MultivariateNormal(variational_dist.mean, predictive_covar)
# Otherwise, we have to marginalize
else:
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (variational_dist.mean - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# If we're less than a certain size, we'll compute the Cholesky decomposition of induc_induc_covar
cholesky = False
if settings.fast_computations.log_prob.off() or (num_induc <= settings.max_cholesky_size.value()):
induc_induc_covar = CholLazyTensor(induc_induc_covar.cholesky())
cholesky = True
# Cache the CG results
# Do not use preconditioning for whitened VI, as it does not seem to improve performance.
with settings.max_preconditioner_size(0):
with torch.no_grad():
eager_rhs = torch.cat([induc_data_covar, mean_diff], -1)
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = CachedCGLazyTensor.precompute_terms(
induc_induc_covar,
eager_rhs.detach(),
logdet_terms=(not cholesky),
include_tmats=(not settings.skip_logdet_forward.on() and not cholesky),
)
eager_rhss = [eager_rhs.detach()]
solves = [solve.detach()]
if settings.skip_logdet_forward.on() and self.training:
eager_rhss.append(torch.cat([probe_vecs, eager_rhs], -1))
solves.append(torch.cat([probe_vec_solves, solve[..., : eager_rhs.size(-1)]], -1))
elif not self.training:
eager_rhss.append(eager_rhs[..., :-1])
solves.append(solve[..., :-1])
induc_induc_covar = CachedCGLazyTensor(
induc_induc_covar,
eager_rhss=eager_rhss,
solves=solves,
probe_vectors=probe_vecs,
probe_vector_norms=probe_vec_norms,
probe_vector_solves=probe_vec_solves,
probe_vector_tmats=tmats,
)
# Compute some terms that will be necessary for the predicitve covariance and KL divergence
if self.training:
interp_data_data_var_plus_mean_diff_inv_quad, logdet = induc_induc_covar.inv_quad_logdet(
torch.cat([induc_data_covar, mean_diff], -1), logdet=True, reduce_inv_quad=False
)
interp_data_data_var = interp_data_data_var_plus_mean_diff_inv_quad[..., :-1]
mean_diff_inv_quad = interp_data_data_var_plus_mean_diff_inv_quad[..., -1]
# Compute predictive mean
predictive_mean = torch.add(
test_mean,
induc_induc_covar.inv_matmul(mean_diff, left_tensor=induc_data_covar.transpose(-1, -2)).squeeze(-1),
)
# Compute the predictive covariance
is_root_lt = isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor)
is_repeated_root_lt = isinstance(
variational_dist.lazy_covariance_matrix, BatchRepeatLazyTensor
) and isinstance(variational_dist.lazy_covariance_matrix.base_lazy_tensor, RootLazyTensor)
if is_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2) @ variational_dist.lazy_covariance_matrix.root.evaluate()
)
elif is_repeated_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
induc_data_covar.transpose(-1, -2), predictive_covar @ induc_data_covar
)
if self.training:
data_covariance = DiagLazyTensor((data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf))
else:
neg_induc_data_data_covar = torch.matmul(
induc_data_covar.transpose(-1, -2).mul(-1), induc_induc_covar.inv_matmul(induc_data_covar)
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
# Save the logdet, mean_diff_inv_quad, prior distribution for the ELBO
if self.training:
self._memoize_cache["prior_distribution_memo"] = MultivariateNormal(induc_mean, induc_induc_covar)
self._memoize_cache["logdet_memo"] = -logdet
self._memoize_cache["mean_diff_inv_quad_memo"] = mean_diff_inv_quad
return MultivariateNormal(predictive_mean, predictive_covar)
def __call__(self, x, prior=False):
# If we're in prior mode, then we're done!
if prior:
return self.model.forward(x)
# Delete previously cached items from the training distribution
if self.training:
if hasattr(self, "_memoize_cache"):
delattr(self, "_memoize_cache")
self._memoize_cache = dict()
# (Maybe) initialize variational distribution
if not self.variational_params_initialized.item():
prior_dist = self.prior_distribution
self._variational_distribution.initialize_variational_distribution(prior_dist)
self.variational_params_initialized.fill_(1)
return Module.__call__(self, x)
| [
"torch.cat",
"torch.equal",
"torch.no_grad"
] | 1.3 | Xiao-dong-Wang/gpytorch | 92e07cf4dae26083fe0aed926e1dfd483443924e |
1.9 | """Structural Regularization for """
from torch.nn.parameter import Parameter
from allennlp.common import Registrable
from allennlp.data.vocabulary import Vocabulary
from pathlib import Path
from networkx.exception import NetworkXException
from typing import List, Tuple, Union, Dict, Any, Optional
import torch
import networkx as nx
import logging
from box_mlc.dataset_readers.hierarchy_readers.hierarchy_reader import (
HierarchyReader,
)
logger = logging.getLogger(__name__)
class HierarchyRegularizer(torch.nn.Module, Registrable):
"""Base class to satisfy Registrable and to define the common hierarchy initializations"""
def __init__(
self,
alpha: float,
hierarchy_reader: HierarchyReader,
debug_level: int = 0,
) -> None:
"""
Args:
alpha: The regularization parameter that is multiplied with the hierarchy struct loss.
hierarchy_reader: Creates the adjacency_matrix and the mask
debug_level: scale of 0 to 3. 0 meaning no-debug (fastest) and 3 highest debugging possible (slowest).
Returns: (None)
"""
super().__init__() # type:ignore
self.alpha = alpha
self.debug_level = debug_level
self.adjacency_matrix = Parameter(
hierarchy_reader.adjacency_matrix, requires_grad=False
) #: Adj(i,j) =1 => if j is true, i is true.
# self.mask = Parameter(self.initialize_mask(), requires_grad=False) #type: torch.Tensor
self.mask: torch.BoolTensor = ( # pylint: disable
hierarchy_reader.mask # type:ignore
) # noqa
def to(self, *args, **kwargs): # type: ignore # noqa
"""Deligates to `torch.nn.Module.to`. Additionally moves `self.mask` to the correct device.
This is needed because we depend on to() to move the all tensors and params to appropriate device.
Args:
args: same as super class
kwargs: same as super class
"""
super().to(*args, **kwargs)
(
device,
dtype,
non_blocking,
convert_to_format,
) = torch._C._nn._parse_to(*args, **kwargs)
self.mask.to(device=device)
def get_active_adjacency_matrix_and_mask(
self, active_mask: torch.BoolTensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
active_mask: 1D Boolean Tensor of shape (adj.shape[0],) indicating which rows and columns to take.
Returns:
torch.Tensor: masked adj matrix
torch.Tensor: masked mask
"""
assert len(active_mask.shape) == 1
assert active_mask.shape[0] == self.adjacency_matrix.shape[0]
num_active = torch.sum(active_mask)
active_mask_float = active_mask.to(dtype=torch.float)
active_mask_matrix = torch.ger(
active_mask_float, active_mask_float
).to(dtype=torch.bool)
return (self.adjacency_matrix[active_mask_matrix]).reshape(
num_active, num_active
), (self.mask[active_mask_matrix]).reshape(num_active, num_active)
| [
"torch.ger",
"torch._C._nn._parse_to",
"torch.nn.parameter.Parameter",
"torch.sum"
] | 1.9.0 | iesl/box-mlc | 15439b7e46885458d0c45d530c17f1deac0398f8 |
1.2 | import torch
from src.solver import BaseSolver
from src.asr import ASR
from src.optim import Optimizer
from src.data import load_dataset
from src.util import human_format, cal_er, feat_to_fig
class Solver(BaseSolver):
''' Solver for training'''
def __init__(self, config, paras, mode):
super().__init__(config, paras, mode)
# Logger settings
self.best_wer = {'att': 3.0, 'ctc': 3.0}
# Curriculum learning affects data loader
self.curriculum = self.config['hparas']['curriculum']
def fetch_data(self, data):
''' Move data to device and compute text seq. length'''
_, feat, feat_len, txt = data
feat = feat.to(self.device)
feat_len = feat_len.to(self.device)
txt = txt.to(self.device)
txt_len = torch.sum(txt != 0, dim=-1)
return feat, feat_len, txt, txt_len
def load_data(self):
''' Load data for training/validation, store tokenizer and input/output shape'''
self.tr_set, self.dv_set, self.feat_dim, self.vocab_size, self.tokenizer, msg = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
self.curriculum > 0, **self.config['data'])
self.verbose(msg)
def set_model(self):
''' Setup ASR model and optimizer '''
# Model
init_adadelta = self.config['hparas']['optimizer'] == 'Adadelta'
self.model = ASR(self.feat_dim, self.vocab_size, init_adadelta, **
self.config['model']).to(self.device)
self.verbose(self.model.create_msg())
model_paras = [{'params': self.model.parameters()}]
# Losses
self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)
# Note: zero_infinity=False is unstable?
self.ctc_loss = torch.nn.CTCLoss(blank=0, zero_infinity=False)
# Plug-ins
self.emb_fuse = False
self.emb_reg = ('emb' in self.config) and (
self.config['emb']['enable'])
if self.emb_reg:
from src.plugin import EmbeddingRegularizer
self.emb_decoder = EmbeddingRegularizer(
self.tokenizer, self.model.dec_dim, **self.config['emb']).to(self.device)
model_paras.append({'params': self.emb_decoder.parameters()})
self.emb_fuse = self.emb_decoder.apply_fuse
if self.emb_fuse:
self.seq_loss = torch.nn.NLLLoss(ignore_index=0)
self.verbose(self.emb_decoder.create_msg())
# Optimizer
self.optimizer = Optimizer(model_paras, **self.config['hparas'])
self.verbose(self.optimizer.create_msg())
# Enable AMP if needed
self.enable_apex()
# Automatically load pre-trained model if self.paras.load is given
self.load_ckpt()
# ToDo: other training methods
def exec(self):
''' Training End-to-end ASR system '''
self.verbose('Total training steps {}.'.format(
human_format(self.max_step)))
ctc_loss, att_loss, emb_loss = None, None, None
n_epochs = 0
self.timer.set()
while self.step < self.max_step:
# Renew dataloader to enable random sampling
if self.curriculum > 0 and n_epochs == self.curriculum:
self.verbose(
'Curriculum learning ends after {} epochs, starting random sampling.'.format(n_epochs))
self.tr_set, _, _, _, _, _ = \
load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,
False, **self.config['data'])
for data in self.tr_set:
# Pre-step : update tf_rate/lr_rate and do zero_grad
tf_rate = self.optimizer.pre_step(self.step)
total_loss = 0
# Fetch data
feat, feat_len, txt, txt_len = self.fetch_data(data)
self.timer.cnt('rd')
# Forward model
# Note: txt should NOT start w/ <sos>
ctc_output, encode_len, att_output, att_align, dec_state = \
self.model(feat, feat_len, max(txt_len), tf_rate=tf_rate,
teacher=txt, get_dec_state=self.emb_reg)
# Plugins
if self.emb_reg:
emb_loss, fuse_output = self.emb_decoder(
dec_state, att_output, label=txt)
total_loss += self.emb_decoder.weight*emb_loss
# Compute all objectives
if ctc_output is not None:
if self.paras.cudnn_ctc:
ctc_loss = self.ctc_loss(ctc_output.transpose(0, 1),
txt.to_sparse().values().to(device='cpu', dtype=torch.int32),
[ctc_output.shape[1]] *
len(ctc_output),
txt_len.cpu().tolist())
else:
ctc_loss = self.ctc_loss(ctc_output.transpose(
0, 1), txt, encode_len, txt_len)
total_loss += ctc_loss*self.model.ctc_weight
if att_output is not None:
b, t, _ = att_output.shape
att_output = fuse_output if self.emb_fuse else att_output
att_loss = self.seq_loss(
att_output.contiguous().view(b*t, -1), txt.contiguous().view(-1))
total_loss += att_loss*(1-self.model.ctc_weight)
self.timer.cnt('fw')
# Backprop
grad_norm = self.backward(total_loss)
self.step += 1
# Logger
if (self.step == 1) or (self.step % self.PROGRESS_STEP == 0):
self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'
.format(total_loss.cpu().item(), grad_norm, self.timer.show()))
self.write_log(
'loss', {'tr_ctc': ctc_loss, 'tr_att': att_loss})
self.write_log('emb_loss', {'tr': emb_loss})
self.write_log('wer', {'tr_att': cal_er(self.tokenizer, att_output, txt),
'tr_ctc': cal_er(self.tokenizer, ctc_output, txt, ctc=True)})
if self.emb_fuse:
if self.emb_decoder.fuse_learnable:
self.write_log('fuse_lambda', {
'emb': self.emb_decoder.get_weight()})
self.write_log(
'fuse_temp', {'temp': self.emb_decoder.get_temp()})
# Validation
if (self.step == 1) or (self.step % self.valid_step == 0):
self.validate()
# End of step
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-529185354
torch.cuda.empty_cache()
self.timer.set()
if self.step > self.max_step:
break
n_epochs += 1
self.log.close()
def validate(self):
# Eval mode
self.model.eval()
if self.emb_decoder is not None:
self.emb_decoder.eval()
dev_wer = {'att': [], 'ctc': []}
for i, data in enumerate(self.dv_set):
self.progress('Valid step - {}/{}'.format(i+1, len(self.dv_set)))
# Fetch data
feat, feat_len, txt, txt_len = self.fetch_data(data)
# Forward model
with torch.no_grad():
ctc_output, encode_len, att_output, att_align, dec_state = \
self.model(feat, feat_len, int(max(txt_len)*self.DEV_STEP_RATIO),
emb_decoder=self.emb_decoder)
dev_wer['att'].append(cal_er(self.tokenizer, att_output, txt))
dev_wer['ctc'].append(cal_er(self.tokenizer, ctc_output, txt, ctc=True))
# Show some example on tensorboard
if i == len(self.dv_set)//2:
for i in range(min(len(txt), self.DEV_N_EXAMPLE)):
if self.step == 1:
self.write_log('true_text{}'.format(
i), self.tokenizer.decode(txt[i].tolist()))
if att_output is not None:
self.write_log('att_align{}'.format(i), feat_to_fig(
att_align[i, 0, :, :].cpu().detach()))
self.write_log('att_text{}'.format(i), self.tokenizer.decode(
att_output[i].argmax(dim=-1).tolist()))
if ctc_output is not None:
self.write_log('ctc_text{}'.format(i), self.tokenizer.decode(ctc_output[i].argmax(dim=-1).tolist(),
ignore_repeat=True))
# Ckpt if performance improves
for task in ['att', 'ctc']:
dev_wer[task] = sum(dev_wer[task])/len(dev_wer[task])
if dev_wer[task] < self.best_wer[task]:
self.best_wer[task] = dev_wer[task]
self.save_checkpoint('best_{}.pth'.format(task), 'wer', dev_wer[task])
self.write_log('wer', {'dv_'+task: dev_wer[task]})
self.save_checkpoint('latest.pth', 'wer', dev_wer['att'], show_msg=False)
# Resume training
self.model.train()
if self.emb_decoder is not None:
self.emb_decoder.train()
| [
"torch.nn.NLLLoss",
"torch.no_grad",
"torch.nn.CTCLoss",
"torch.cuda.empty_cache",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.2.0 | voidism/End-to-end-ASR-Pytorch | 509c389fa6ab98c30e227c6f4c8f7474adbc1bb2 |
1.10 | # an implementation of PPO algorithm
# reference to: https://github.com/nikhilbarhate99/PPO-PyTorch
import torch
import torch.nn as nn
from torch.optim import Adam, RMSprop
from torch.distributions import Categorical
from torch.utils.tensorboard.writer import SummaryWriter
from torch.utils.data import BatchSampler, RandomSampler
from typing import Tuple
# this class implements an actor critic model with linear networks
class ActorCritic(nn.Module):
def __init__(self, state_dimension, action_dimension):
super().__init__()
# save dimensions
self.d_state = state_dimension
self.d_action = action_dimension
# create actor network
self.actor = nn.Sequential(
nn.Linear(self.d_state, 1024),
nn.LeakyReLU(),
nn.Linear(1024, 512),
nn.LeakyReLU(),
nn.Linear(512, 256),
nn.LeakyReLU(),
nn.Linear(256, 128),
nn.LeakyReLU(),
nn.Linear(128, self.d_action),
nn.Softmax(dim=1)
)
# create critic network
self.critic = nn.Sequential(
nn.Linear(self.d_state, 1024),
nn.LeakyReLU(),
nn.Linear(1024, 512),
nn.LeakyReLU(),
nn.Linear(512, 256),
nn.LeakyReLU(),
nn.Linear(256, 128),
nn.LeakyReLU(),
nn.Linear(128, 64),
nn.LeakyReLU(),
nn.Linear(64, 1)
)
def forward(self, x):
"""
Empty forward function
"""
return x
def action(self, state) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Get action and log probs
"""
# get probabilities of actions
probs = self.actor(state)
dist = Categorical(probs=probs)
# sample an action
action = dist.sample()
logprob = dist.log_prob(action)
return action.detach(), logprob.detach()
def evaluate(self, state, action) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Evaluates an action
"""
# get probabilities of actions
probs = self.actor(state)
dist = Categorical(probs=probs)
# get distribution entropy and log probs of chosen action
entropy = dist.entropy()
logprob = dist.log_prob(action).diagonal().view(action.shape)
# get critic value
critics = self.critic(state)
return entropy, logprob, critics
# this structure stores buffer info for PPO
class PPOBuffer:
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def reset(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
del self.is_terminals[:]
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def isEmpty(self):
return len(self.actions) <= 0
# this class implements PPO model
# with actor and critic updated individually
class PPO(object):
def __init__(self,
state_dimension, action_dimension,
lr_actor, lr_critic,
num_epochs, discount,
eps_clip, batch_size,
max_grad_norm, train
):
self.discount = discount
self.num_epochs = num_epochs
self.eps_clip = eps_clip
self.lr_actor = lr_actor
self.lr_critic = lr_critic
self.batch_size = batch_size
self.max_grad_norm = max_grad_norm
self.training = train
self.iter_count = 0
# create buffer
self.buffer = PPOBuffer()
# select running environment for train
self.device = "cuda" if torch.cuda.is_available() else "cpu"
# create actor critic model
self.AC = ActorCritic(state_dimension, action_dimension).to(self.device)
# set optimizer
self.optim_actor = Adam(self.AC.actor.parameters(), lr_actor)
self.optim_critic = Adam(self.AC.critic.parameters(), lr_critic)
# set saved model
self.AC_saved = ActorCritic(state_dimension, action_dimension).to(self.device)
self.AC_saved.load_state_dict(self.AC.state_dict())
self.AC_saved.eval()
# set loss function
self.loss = nn.MSELoss()
def action(self, state):
"""
Choose next action
"""
with torch.no_grad():
# get new action from actor
state = torch.FloatTensor(state).to(self.device)
action, logprob = self.AC_saved.action(state)
# store into buffer
if self.training:
self.buffer.states.append(state)
self.buffer.actions.append(action)
self.buffer.logprobs.append(logprob)
return action.cpu().item()
def save(self, filename):
"""
Save current network to file path
"""
torch.save((
self.AC_saved.state_dict(),
self.optim_actor.state_dict(),
self.optim_critic.state_dict(),
), filename)
def load(self, filename):
"""
Load network from file path
"""
states, opt1, opt2 = torch.load(filename, map_location=lambda storage, _: storage)
self.AC.load_state_dict(states)
self.AC_saved.load_state_dict(states)
self.optim_actor.load_state_dict(opt1)
self.optim_critic.load_state_dict(opt2)
def train(self, writer : SummaryWriter):
"""
Update policy
"""
if not self.training: return
if self.buffer.isEmpty(): return
rewards = []
reward_disc = 0.0
for reward, is_terminal in zip(reversed(self.buffer.rewards), reversed(self.buffer.is_terminals)):
# if is terminal state, set reward to 0
if is_terminal:
reward_disc = 0.0
reward_disc = reward + (self.discount * reward_disc)
rewards.insert(0, reward_disc)
length = len(rewards)
# normalize the rewards
target_values = torch.FloatTensor(rewards)
target_values = (target_values - target_values.mean()) / (target_values.std() + 1e-8)
target_values = target_values.view(-1, 1)
# convert list to tensor
old_states = torch.squeeze(torch.stack(self.buffer.states[:length], dim=0)).detach()
old_actions = torch.squeeze(torch.stack(self.buffer.actions[:length], dim=0)).view(-1, 1).detach()
old_logprobs = torch.squeeze(torch.stack(self.buffer.logprobs[:length], dim=0)).view(-1, 1).detach()
# start training
self.AC.train()
torch.cuda.empty_cache()
for _ in range(self.num_epochs):
for indices in BatchSampler(RandomSampler(range(length)), batch_size=self.batch_size, drop_last=False):
target_values_gpu = target_values[indices].to(self.device)
old_states_gpu = old_states[indices].to(self.device)
old_actions_gpu = old_actions[indices].to(self.device)
old_logprobs_gpu = old_logprobs[indices].to(self.device)
# get critics
_, logprob, state_values = self.AC.evaluate(old_states_gpu, old_actions_gpu)
# state_values = torch.squeeze(critics)
# compute advantages
advantages = (target_values_gpu - state_values).detach()
# find the ratio (pi_theta / pi_theta__old)
ratios = torch.exp((logprob - old_logprobs_gpu))
# find Surrogate Loss (Clipped Surrogate Objective)
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1 - self.eps_clip, 1 + self.eps_clip) * advantages
# compute actor loss
loss_actor = -torch.min(surr1, surr2).mean()
# optimize actor
self.optim_actor.zero_grad()
loss_actor.backward()
torch.nn.utils.clip_grad.clip_grad_norm_(self.AC.actor.parameters(), max_norm=self.max_grad_norm)
self.optim_actor.step()
# compute critic loss
loss_critic = self.loss(state_values, target_values_gpu).mean()
self.optim_critic.zero_grad()
loss_critic.backward()
torch.nn.utils.clip_grad.clip_grad_norm_(self.AC.critic.parameters(), max_norm=self.max_grad_norm)
self.optim_critic.step()
# log in tensorboard
writer.add_scalar("PPO/Loss Actor", loss_actor.cpu().detach().item(), self.iter_count)
writer.add_scalar("PPO/Loss Critic", loss_critic.cpu().detach().item(), self.iter_count)
writer.add_scalar("PPO/Advantage", advantages.cpu().detach().mean().item(), self.iter_count)
self.iter_count += 1
self.AC.eval()
# save weights after training
self.AC_saved.load_state_dict(self.AC.state_dict())
self.AC_saved.eval()
# clear buffer
self.buffer.reset()
def update(self, reward, is_terminal):
"""
Update buffer
"""
if not self.training: return
self.buffer.rewards.append(reward)
self.buffer.is_terminals.append(is_terminal) | [
"torch.nn.Linear",
"torch.distributions.Categorical",
"torch.stack",
"torch.nn.MSELoss",
"torch.nn.Softmax",
"torch.min",
"torch.FloatTensor",
"torch.nn.LeakyReLU",
"torch.no_grad",
"torch.clamp",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.load",
"torch.exp"
] | 1.10 | fightZero/fightZero | 84c2f76c7dda31837d002e47cd74936044251079 |
1.1 | import argparse
import pickle
import torch
import os
import numpy as np
from src.models.api import EvaluationModel, NegSampleGenerator
from torch import nn
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class DistMult:
def get_score(self, head: torch.tensor, relation: torch.tensor,
tail: torch.tensor, mode: str) -> torch.tensor:
"""
Computes Scores for head, relation, tail triples with DistMult model
:param head: torch.tensor, dtype: int, shape: (batch_size, sample_size,
entity_dim)
:param relation: torch.tensor, dtype: int, shape: (batch_size,
sample_size, relation_dim)
:param tail: torch.tensor, dtype: int, shape: (batch_size, sample_size,
entity_dim)
:param mode: str ('single', 'head-batch' or 'head-tail')
:return: torch.tensor, dtype: float, shape: (batch_size, num_entities)
"""
if mode == 'head-batch':
score = head * (relation * tail)
else:
score = (head * relation) * tail
score = score.sum(dim=2)
return score
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Load trained model and use it for predictions'
)
parser.add_argument('-m', '--model', type=str, default=None)
parser.add_argument('-d', '--data', type=str, default=None)
parser.add_argument('-o', '--output_path', type=str, default=None)
return parser.parse_args(args)
def load_data(data_path):
path_train = os.path.join(data_path, 'train.pickle')
with open(path_train, 'rb') as handle:
train = pickle.load(handle)
path_valid = os.path.join(data_path, 'valid.pickle')
with open(path_valid, 'rb') as handle:
valid = pickle.load(handle)
path_test = os.path.join(data_path, 'test.pickle')
with open(path_test, 'rb') as handle:
test = pickle.load(handle)
return train, valid, test
def main(args):
"""
Load trained model and use it for predictions.
"""
if args.model is None or args.data is None:
raise ValueError('You have to specify model and data input paths.')
# load data
train_triples, valid_triples, test_triples = load_data(args.data)
# create model and load already trained embeddings
all_true_triples = np.concatenate([train_triples, valid_triples,
test_triples], axis=0)
neg_sample_generator = NegSampleGenerator(all_true_triples,
create_filter_bias=True)
model = EvaluationModel(model_class=DistMult(),
neg_sample_generator=neg_sample_generator)
path = os.path.join(args.model, 'entity_embedding.npy')
new_entity_embedding = nn.Parameter(torch.from_numpy(np.load(path)))
path = os.path.join(args.model, 'relation_embedding.npy')
new_relation_embedding = nn.Parameter(torch.from_numpy(np.load(path)))
# only True if embeddings of RotatE are used
if new_entity_embedding.shape[1] != new_relation_embedding.shape[1]:
stop = new_relation_embedding.shape[1]
new_entity_embedding = new_entity_embedding[:, :stop]
new_entity_embedding = nn.Parameter(new_entity_embedding)
model.change_entity_embedding(new_entity_embedding.cuda())
model.change_relation_embedding(new_relation_embedding.cuda())
model.cuda()
model.eval()
# use API to evaluate model and generate model output for error analysis
s = torch.tensor(test_triples[:, 0]).cuda()
p = torch.tensor(test_triples[:, 1]).cuda()
o = torch.tensor(test_triples[:, 2]).cuda()
evaluation_result = model.evaluate(s, p, o, batch_size=4)
if args.output_path is not None:
model.generate_model_output(output_path=args.output_path,
test_triples=test_triples,
evaluation_result=evaluation_result)
if __name__ == '__main__':
main(parse_args())
| [
"torch.tensor",
"torch.nn.Parameter"
] | 1.1.0 | wang-yuhao/Practical-Big-Data-Science-ADL-AI | 0bf63bf210f506e287f8492e716bb3394137d74b |
1.1 | # coding=utf-8
import torch
from torch import nn
import numpy as np
from src.models.api import AbstractModel, evaluate
class DistMult(AbstractModel):
def __init__(
self,
num_entities: int,
num_relations: int,
embedding_dim: int,
):
super(DistMult, self).__init__(
num_entities=num_entities,
num_relations=num_relations
)
self.entity_embedding = nn.Embedding(
num_embeddings=num_entities,
embedding_dim=embedding_dim
)
self.relation_embedding = nn.Embedding(
num_embeddings=num_relations,
embedding_dim=embedding_dim
)
def score_subjects(
self,
p: torch.tensor,
o: torch.tensor,
) -> torch.tensor:
p_emb = self.relation_embedding(p)
o_emb = self.entity_embedding(o)
all_emb = self.entity_embedding.weight.data
return torch.sum(
all_emb * p_emb[None, :] * o_emb[None, :],
dim=-1
)
def score_objects(
self,
s: torch.tensor,
p: torch.tensor,
) -> torch.tensor:
s_emb = self.entity_embedding(s)
p_emb = self.relation_embedding(p)
all_emb = self.entity_embedding.weight.data
return torch.sum(
s_emb[None, :] * p_emb[None, :] * all_emb,
dim=-1
)
def forward(self, *inputs):
raise Exception("Not implemented")
if __name__ == '__main__':
model = DistMult(num_entities=128, num_relations=16, embedding_dim=64)
n_triples = 256
device = torch.device('cpu')
sbjs = np.random.randint(
model.num_entities,
size=(n_triples,),
dtype=np.int32
)
pred = np.random.randint(
model.num_relation,
size=(n_triples,),
dtype=np.int32
)
objs = np.random.randint(
model.num_entities,
size=(n_triples,),
dtype=np.int32
)
fake_triples = np.stack([sbjs, pred, objs], axis=-1)
results = evaluate(triples=fake_triples, model=model, device=device)
print(results)
| [
"torch.device",
"torch.nn.Embedding",
"torch.sum"
] | 1.1.0 | wang-yuhao/Practical-Big-Data-Science-ADL-AI | 0bf63bf210f506e287f8492e716bb3394137d74b |
1.6 | import time
import torch
from tqdm import tqdm
import pytorch_warmup as warmup
import numpy as np
import random
import cv2
from lanedet.models.registry import build_net
from .registry import build_trainer, build_evaluator
from .optimizer import build_optimizer
from .scheduler import build_scheduler
from lanedet.datasets import build_dataloader
from lanedet.utils.recorder import build_recorder
from lanedet.utils.net_utils import save_model, load_network
class Runner(object):
def __init__(self, cfg):
torch.manual_seed(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
self.cfg = cfg
self.recorder = build_recorder(self.cfg)
self.net = build_net(self.cfg)
# self.net.to(torch.device('cuda'))
self.net = torch.nn.parallel.DataParallel(
self.net, device_ids = range(self.cfg.gpus)).cuda()
self.recorder.logger.info('Network: \n' + str(self.net))
self.resume()
self.optimizer = build_optimizer(self.cfg, self.net)
self.scheduler = build_scheduler(self.cfg, self.optimizer)
self.warmup_scheduler = None
# TODO(zhengtu): remove this hard code
if self.cfg.optimizer.type == 'SGD':
self.warmup_scheduler = warmup.LinearWarmup(
self.optimizer, warmup_period=5000)
self.metric = 0.
self.val_loader = None
def resume(self):
if not self.cfg.load_from and not self.cfg.finetune_from:
return
load_network(self.net, self.cfg.load_from,
finetune_from=self.cfg.finetune_from, logger=self.recorder.logger)
def to_cuda(self, batch):
for k in batch:
if k == 'meta':
continue
batch[k] = batch[k].cuda()
return batch
def train_epoch(self, epoch, train_loader):
self.net.train()
end = time.time()
max_iter = len(train_loader)
for i, data in enumerate(train_loader):
if self.recorder.step >= self.cfg.total_iter:
break
date_time = time.time() - end
self.recorder.step += 1
data = self.to_cuda(data)
output = self.net(data)
self.optimizer.zero_grad()
loss = output['loss']
loss.backward()
self.optimizer.step()
self.scheduler.step()
if self.warmup_scheduler:
self.warmup_scheduler.dampen()
batch_time = time.time() - end
end = time.time()
self.recorder.update_loss_stats(output['loss_stats'])
self.recorder.batch_time.update(batch_time)
self.recorder.data_time.update(date_time)
if i % self.cfg.log_interval == 0 or i == max_iter - 1:
lr = self.optimizer.param_groups[0]['lr']
self.recorder.lr = lr
self.recorder.record('train')
def train(self):
self.recorder.logger.info('Build train loader...')
train_loader = build_dataloader(self.cfg.dataset.train, self.cfg, is_train=True)
self.recorder.logger.info('Start training...')
for epoch in range(self.cfg.epochs):
self.recorder.epoch = epoch
self.train_epoch(epoch, train_loader)
if (epoch + 1) % self.cfg.save_ep == 0 or epoch == self.cfg.epochs - 1:
self.save_ckpt()
if (epoch + 1) % self.cfg.eval_ep == 0 or epoch == self.cfg.epochs - 1:
self.validate()
if self.recorder.step >= self.cfg.total_iter:
break
def validate(self):
if not self.val_loader:
self.val_loader = build_dataloader(self.cfg.dataset.val, self.cfg, is_train=False)
self.net.eval()
predictions = []
for i, data in enumerate(tqdm(self.val_loader, desc=f'Validate')):
data = self.to_cuda(data)
with torch.no_grad():
output = self.net(data)
predictions.extend(output)
if self.cfg.view:
self.val_loader.dataset.view(predictions, data['meta'])
out = self.val_loader.dataset.evaluate(predictions, self.cfg.work_dir)
self.recorder.logger.info(out)
metric = out
if metric > self.metric:
self.metric = metric
self.save_ckpt(is_best=True)
self.recorder.logger.info('Best metric: ' + str(self.metric))
def save_ckpt(self, is_best=False):
save_model(self.net, self.optimizer, self.scheduler,
self.recorder, is_best)
| [
"torch.manual_seed",
"torch.no_grad"
] | 1.6.0 | zhangzhongshuai/lanedet | bff96fcbed122ac0f876d8e64ada7795ca34e4b6 |
1.4 | """Module for (demo) viewer."""
import os
from dataclasses import dataclass
from glob import glob
from logging import getLogger
from os.path import basename, join
from typing import List, Optional, Tuple
import cv2
import numpy as np
import seaborn as sns
import torch
import torch.cuda
import torchvision
from hydra.utils import to_absolute_path
from frcnn.labels import COCO91
from frcnn.models import FasterRCNN, fasterrcnn_resnet50_fpn
__all__ = ["ImageViewer"]
logger = getLogger(__name__)
ColorType = Tuple[int, int, int]
@dataclass
class BasicConfig:
gpu: bool
conf: float
display: bool
weights: Optional[str]
@dataclass
class ImageConfig:
root: str
outputs: str
@dataclass
class Config:
basic: BasicConfig
image: ImageConfig
@dataclass
class FasterRCNNOutput:
boxes: torch.Tensor
labels: torch.Tensor
scores: torch.Tensor
class ImageViewer:
COLORS: List[ColorType] = [
tuple(int(c * 255) for c in color) for color in sns.color_palette(n_colors=len(COCO91)) # type: ignore
]
def __init__(self, cfg: Config):
self._cfg = cfg
self._model = self._load_model(cfg.basic.weights)
self._paths = sorted(glob(join(to_absolute_path(cfg.image.root), "*")))
self._device = "cuda" if cfg.basic.gpu and torch.cuda.is_available() else "cpu"
os.makedirs(cfg.image.outputs, exist_ok=True)
@torch.no_grad()
def run(self):
self._model = self._model.to(self._device).eval()
for i, path in enumerate(self._paths):
image_bgr: np.ndarray = cv2.imread(path)
image_rgb: np.ndarray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
image_tensor: torch.Tensor = torchvision.transforms.functional.to_tensor(image_rgb).to(self._device)
# only the first element because input only one image
output = FasterRCNNOutput(**self._model([image_tensor])[0])
boxes = output.boxes.cpu().numpy()
labels = output.labels.cpu().numpy()
scores = output.scores.cpu().numpy()
logger.debug(
f"[{i + 1}/{len(self._paths)}] Detect {len([s for s in scores if s >= self._cfg.basic.conf]):2d} "
+ f"objects in {path}",
)
image_bgr = self._draw_results(image_bgr, boxes, labels, scores)
if self._cfg.basic.display:
cv2.imshow("", image_bgr)
cv2.waitKey(1)
cv2.imwrite(join(self._cfg.image.outputs, basename(path)), image_bgr)
@staticmethod
def _load_model(weights: Optional[str]) -> FasterRCNN:
logger.debug(f"Load weights: {weights}")
if weights is None:
model = fasterrcnn_resnet50_fpn(pretrained=True)
else:
model = fasterrcnn_resnet50_fpn(pretrained=False)
model = model.load_state_dict(torch.load(weights))
return model
def _draw_results(self, image: np.ndarray, boxes: np.ndarray, labels: np.ndarray, scores: np.ndarray) -> np.ndarray:
"""Draw texts and rectangles to the image (BGR)."""
for box, label, score in zip(boxes, labels, scores):
if score < self._cfg.basic.conf:
continue
image = cv2.putText(
image,
COCO91[label],
(round(box[0]), round(box[1])),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=1,
color=self.COLORS[label],
thickness=2,
)
image = cv2.rectangle(
image,
(round(box[0]), round(box[1])),
(round(box[2]), round(box[3])),
color=self.COLORS[label],
thickness=2,
)
return image
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 1.4.0 | skmatz/frcnn | eae9d42f964a5883f72dc294984c019b3c75e837 |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities and types for defining networks, these depend on PyTorch.
"""
import re
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union
import torch
import torch.nn as nn
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.misc import ensure_tuple, set_determinism
from monai.utils.module import pytorch_after
__all__ = [
"one_hot",
"slice_channels",
"predict_segmentation",
"normalize_transform",
"to_norm_affine",
"normal_init",
"icnr_init",
"pixelshuffle",
"eval_mode",
"train_mode",
"copy_model_state",
"convert_to_torchscript",
]
def one_hot(labels: torch.Tensor, num_classes: int, dtype: torch.dtype = torch.float, dim: int = 1) -> torch.Tensor:
"""
For every value v in `labels`, the value in the output will be either 1 or 0. Each vector along the `dim`-th
dimension has the "one-hot" format, i.e., it has a total length of `num_classes`,
with a one and `num_class-1` zeros.
Note that this will include the background label, thus a binary mask should be treated as having two classes.
Args:
labels: input tensor of integers to be converted into the 'one-hot' format. Internally `labels` will be
converted into integers `labels.long()`.
num_classes: number of output channels, the corresponding length of `labels[dim]` will be converted to
`num_classes` from `1`.
dtype: the data type of the output one_hot label.
dim: the dimension to be converted to `num_classes` channels from `1` channel, should be non-negative number.
Example:
For a tensor `labels` of dimensions [B]1[spatial_dims], return a tensor of dimensions `[B]N[spatial_dims]`
when `num_classes=N` number of classes and `dim=1`.
.. code-block:: python
from monai.networks.utils import one_hot
import torch
a = torch.randint(0, 2, size=(1, 2, 2, 2))
out = one_hot(a, num_classes=2, dim=0)
print(out.shape) # torch.Size([2, 2, 2, 2])
a = torch.randint(0, 2, size=(2, 1, 2, 2, 2))
out = one_hot(a, num_classes=2, dim=1)
print(out.shape) # torch.Size([2, 2, 2, 2, 2])
"""
# if `dim` is bigger, add singleton dim at the end
if labels.ndim < dim + 1:
shape = list(labels.shape) + [1] * (dim + 1 - len(labels.shape))
labels = torch.reshape(labels, shape)
sh = list(labels.shape)
if sh[dim] != 1:
raise AssertionError("labels should have a channel with length equal to one.")
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
def slice_channels(tensor: torch.Tensor, *slicevals: Optional[int]) -> torch.Tensor:
slices = [slice(None)] * len(tensor.shape)
slices[1] = slice(*slicevals)
return tensor[slices]
def predict_segmentation(logits: torch.Tensor, mutually_exclusive: bool = False, threshold: float = 0.0) -> Any:
"""
Given the logits from a network, computing the segmentation by thresholding all values above 0
if multi-labels task, computing the `argmax` along the channel axis if multi-classes task,
logits has shape `BCHW[D]`.
Args:
logits: raw data of model output.
mutually_exclusive: if True, `logits` will be converted into a binary matrix using
a combination of argmax, which is suitable for multi-classes task. Defaults to False.
threshold: thresholding the prediction values if multi-labels task.
"""
if not mutually_exclusive:
return (logits >= threshold).int()
if logits.shape[1] == 1:
warnings.warn("single channel prediction, `mutually_exclusive=True` ignored, use threshold instead.")
return (logits >= threshold).int()
return logits.argmax(1, keepdim=True)
def normalize_transform(
shape: Sequence[int],
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
align_corners: bool = False,
) -> torch.Tensor:
"""
Compute an affine matrix according to the input shape.
The transform normalizes the homogeneous image coordinates to the
range of `[-1, 1]`.
Args:
shape: input spatial shape
device: device on which the returned affine will be allocated.
dtype: data type of the returned affine
align_corners: if True, consider -1 and 1 to refer to the centers of the
corner pixels rather than the image corners.
See also: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample
"""
norm = torch.tensor(shape, dtype=torch.float64, device=device) # no in-place change
if align_corners:
norm[norm <= 1.0] = 2.0
norm = 2.0 / (norm - 1.0)
norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))
norm[:-1, -1] = -1.0
else:
norm[norm <= 0.0] = 2.0
norm = 2.0 / norm
norm = torch.diag(torch.cat((norm, torch.ones((1,), dtype=torch.float64, device=device))))
norm[:-1, -1] = 1.0 / torch.tensor(shape, dtype=torch.float64, device=device) - 1.0
norm = norm.unsqueeze(0).to(dtype=dtype)
norm.requires_grad = False
return norm
def to_norm_affine(
affine: torch.Tensor, src_size: Sequence[int], dst_size: Sequence[int], align_corners: bool = False
) -> torch.Tensor:
"""
Given ``affine`` defined for coordinates in the pixel space, compute the corresponding affine
for the normalized coordinates.
Args:
affine: Nxdxd batched square matrix
src_size: source image spatial shape
dst_size: target image spatial shape
align_corners: if True, consider -1 and 1 to refer to the centers of the
corner pixels rather than the image corners.
See also: https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample
Raises:
TypeError: When ``affine`` is not a ``torch.Tensor``.
ValueError: When ``affine`` is not Nxdxd.
ValueError: When ``src_size`` or ``dst_size`` dimensions differ from ``affine``.
"""
if not isinstance(affine, torch.Tensor):
raise TypeError(f"affine must be a torch.Tensor but is {type(affine).__name__}.")
if affine.ndimension() != 3 or affine.shape[1] != affine.shape[2]:
raise ValueError(f"affine must be Nxdxd, got {tuple(affine.shape)}.")
sr = affine.shape[1] - 1
if sr != len(src_size) or sr != len(dst_size):
raise ValueError(f"affine suggests {sr}D, got src={len(src_size)}D, dst={len(dst_size)}D.")
src_xform = normalize_transform(src_size, affine.device, affine.dtype, align_corners)
dst_xform = normalize_transform(dst_size, affine.device, affine.dtype, align_corners)
return src_xform @ affine @ torch.inverse(dst_xform)
def normal_init(
m, std: float = 0.02, normal_func: Callable[[torch.Tensor, float, float], Any] = torch.nn.init.normal_
) -> None:
"""
Initialize the weight and bias tensors of `m' and its submodules to values from a normal distribution with a
stddev of `std'. Weight tensors of convolution and linear modules are initialized with a mean of 0, batch
norm modules with a mean of 1. The callable `normal_func', used to assign values, should have the same arguments
as its default normal_(). This can be used with `nn.Module.apply` to visit submodules of a network.
"""
cname = m.__class__.__name__
if getattr(m, "weight", None) is not None and (cname.find("Conv") != -1 or cname.find("Linear") != -1):
normal_func(m.weight.data, 0.0, std)
if getattr(m, "bias", None) is not None:
nn.init.constant_(m.bias.data, 0.0)
elif cname.find("BatchNorm") != -1:
normal_func(m.weight.data, 1.0, std)
nn.init.constant_(m.bias.data, 0)
def icnr_init(conv, upsample_factor, init=nn.init.kaiming_normal_):
"""
ICNR initialization for 2D/3D kernels adapted from Aitken et al.,2017 , "Checkerboard artifact free
sub-pixel convolution".
"""
out_channels, in_channels, *dims = conv.weight.shape
scale_factor = upsample_factor ** len(dims)
oc2 = int(out_channels / scale_factor)
kernel = torch.zeros([oc2, in_channels] + dims)
kernel = init(kernel)
kernel = kernel.transpose(0, 1)
kernel = kernel.reshape(oc2, in_channels, -1)
kernel = kernel.repeat(1, 1, scale_factor)
kernel = kernel.reshape([in_channels, out_channels] + dims)
kernel = kernel.transpose(0, 1)
conv.weight.data.copy_(kernel)
@deprecated_arg(
name="dimensions", new_name="spatial_dims", since="0.6", msg_suffix="Please use `spatial_dims` instead."
)
def pixelshuffle(
x: torch.Tensor, spatial_dims: int, scale_factor: int, dimensions: Optional[int] = None
) -> torch.Tensor:
"""
Apply pixel shuffle to the tensor `x` with spatial dimensions `spatial_dims` and scaling factor `scale_factor`.
See: Shi et al., 2016, "Real-Time Single Image and Video Super-Resolution
Using a nEfficient Sub-Pixel Convolutional Neural Network."
See: Aitken et al., 2017, "Checkerboard artifact free sub-pixel convolution".
Args:
x: Input tensor
spatial_dims: number of spatial dimensions, typically 2 or 3 for 2D or 3D
scale_factor: factor to rescale the spatial dimensions by, must be >=1
.. deprecated:: 0.6.0
``dimensions`` is deprecated, use ``spatial_dims`` instead.
Returns:
Reshuffled version of `x`.
Raises:
ValueError: When input channels of `x` are not divisible by (scale_factor ** spatial_dims)
"""
if dimensions is not None:
spatial_dims = dimensions
dim, factor = spatial_dims, scale_factor
input_size = list(x.size())
batch_size, channels = input_size[:2]
scale_divisor = factor ** dim
if channels % scale_divisor != 0:
raise ValueError(
f"Number of input channels ({channels}) must be evenly "
f"divisible by scale_factor ** dimensions ({factor}**{dim}={scale_divisor})."
)
org_channels = channels // scale_divisor
output_size = [batch_size, org_channels] + [d * factor for d in input_size[2:]]
indices = tuple(range(2, 2 + 2 * dim))
indices_factor, indices_dim = indices[:dim], indices[dim:]
permute_indices = (0, 1) + sum(zip(indices_dim, indices_factor), ())
x = x.reshape(batch_size, org_channels, *([factor] * dim + input_size[2:]))
x = x.permute(permute_indices).reshape(output_size)
return x
@contextmanager
def eval_mode(*nets: nn.Module):
"""
Set network(s) to eval mode and then return to original state at the end.
Args:
nets: Input network(s)
Examples
.. code-block:: python
t=torch.rand(1,1,16,16)
p=torch.nn.Conv2d(1,1,3)
print(p.training) # True
with eval_mode(p):
print(p.training) # False
print(p(t).sum().backward()) # will correctly raise an exception as gradients are calculated
"""
# Get original state of network(s)
training = [n for n in nets if n.training]
try:
# set to eval mode
with torch.no_grad():
yield [n.eval() for n in nets]
finally:
# Return required networks to training
for n in training:
n.train()
@contextmanager
def train_mode(*nets: nn.Module):
"""
Set network(s) to train mode and then return to original state at the end.
Args:
nets: Input network(s)
Examples
.. code-block:: python
t=torch.rand(1,1,16,16)
p=torch.nn.Conv2d(1,1,3)
p.eval()
print(p.training) # False
with train_mode(p):
print(p.training) # True
print(p(t).sum().backward()) # No exception
"""
# Get original state of network(s)
eval_list = [n for n in nets if not n.training]
try:
# set to train mode
with torch.set_grad_enabled(True):
yield [n.train() for n in nets]
finally:
# Return required networks to eval_list
for n in eval_list:
n.eval()
def copy_model_state(
dst: Union[torch.nn.Module, Mapping],
src: Union[torch.nn.Module, Mapping],
dst_prefix="",
mapping=None,
exclude_vars=None,
inplace=True,
):
"""
Compute a module state_dict, of which the keys are the same as `dst`. The values of `dst` are overwritten
by the ones from `src` whenever their keys match. The method provides additional `dst_prefix` for
the `dst` key when matching them. `mapping` can be a `{"src_key": "dst_key"}` dict, indicating
`dst[dst_prefix + dst_key] = src[src_key]`.
This function is mainly to return a model state dict
for loading the `src` model state into the `dst` model, `src` and `dst` can have different dict keys, but
their corresponding values normally have the same shape.
Args:
dst: a pytorch module or state dict to be updated.
src: a pytorch module or state dist used to get the values used for the update.
dst_prefix: `dst` key prefix, so that `dst[dst_prefix + src_key]`
will be assigned to the value of `src[src_key]`.
mapping: a `{"src_key": "dst_key"}` dict, indicating that `dst[dst_prefix + dst_key]`
to be assigned to the value of `src[src_key]`.
exclude_vars: a regular expression to match the `dst` variable names,
so that their values are not overwritten by `src`.
inplace: whether to set the `dst` module with the updated `state_dict` via `load_state_dict`.
This option is only available when `dst` is a `torch.nn.Module`.
Examples:
.. code-block:: python
from monai.networks.nets import BasicUNet
from monai.networks.utils import copy_model_state
model_a = BasicUNet(in_channels=1, out_channels=4)
model_b = BasicUNet(in_channels=1, out_channels=2)
model_a_b, changed, unchanged = copy_model_state(
model_a, model_b, exclude_vars="conv_0.conv_0", inplace=False)
# dst model updated: 76 of 82 variables.
model_a.load_state_dict(model_a_b)
# <All keys matched successfully>
Returns: an OrderedDict of the updated `dst` state, the changed, and unchanged keys.
"""
if isinstance(src, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
src = src.module
if isinstance(dst, (nn.DataParallel, nn.parallel.DistributedDataParallel)):
dst = dst.module
src_dict = src.state_dict() if isinstance(src, torch.nn.Module) else src
dst_dict = dst.state_dict() if isinstance(dst, torch.nn.Module) else dst
dst_dict = OrderedDict(dst_dict)
to_skip = {s_key for s_key in src_dict if exclude_vars and re.compile(exclude_vars).search(s_key)}
# update dst with items from src
all_keys, updated_keys = list(dst_dict), list()
for s, val in src_dict.items():
dst_key = f"{dst_prefix}{s}"
if dst_key in dst_dict and dst_key not in to_skip and dst_dict[dst_key].shape == val.shape:
dst_dict[dst_key] = val
updated_keys.append(dst_key)
for s in mapping if mapping else {}:
dst_key = f"{dst_prefix}{mapping[s]}"
if dst_key in dst_dict and dst_key not in to_skip:
if dst_dict[dst_key].shape != src_dict[s].shape:
warnings.warn(f"Param. shape changed from {dst_dict[dst_key].shape} to {src_dict[s].shape}.")
dst_dict[dst_key] = src_dict[s]
updated_keys.append(dst_key)
updated_keys = sorted(set(updated_keys))
unchanged_keys = sorted(set(all_keys).difference(updated_keys))
print(f"'dst' model updated: {len(updated_keys)} of {len(dst_dict)} variables.")
if inplace and isinstance(dst, torch.nn.Module):
dst.load_state_dict(dst_dict)
return dst_dict, updated_keys, unchanged_keys
def convert_to_torchscript(
model: nn.Module,
filename_or_obj: Optional[Any] = None,
extra_files: Optional[Dict] = None,
verify: bool = False,
inputs: Optional[Sequence[Any]] = None,
device: Optional[torch.device] = None,
rtol: float = 1e-4,
atol: float = 0.0,
**kwargs,
):
"""
Utility to convert a model into TorchScript model and save to file,
with optional input / output data verification.
Args:
model: source PyTorch model to save.
filename_or_obj: if not None, specify a file-like object (has to implement write and flush)
or a string containing a file path name to save the TorchScript model.
extra_files: map from filename to contents which will be stored as part of the save model file.
works for PyTorch 1.7 or later.
for more details: https://pytorch.org/docs/stable/generated/torch.jit.save.html.
verify: whether to verify the input and output of TorchScript model.
if `filename_or_obj` is not None, load the saved TorchScript model and verify.
inputs: input test data to verify model, should be a sequence of data, every item maps to a argument
of `model()` function.
device: target device to verify the model, if None, use CUDA if available.
rtol: the relative tolerance when comparing the outputs of PyTorch model and TorchScript model.
atol: the absolute tolerance when comparing the outputs of PyTorch model and TorchScript model.
kwargs: other arguments except `obj` for `torch.jit.script()` to convert model, for more details:
https://pytorch.org/docs/master/generated/torch.jit.script.html.
"""
model.eval()
with torch.no_grad():
script_module = torch.jit.script(model, **kwargs)
if filename_or_obj is not None:
if not pytorch_after(1, 7):
torch.jit.save(m=script_module, f=filename_or_obj)
else:
torch.jit.save(m=script_module, f=filename_or_obj, _extra_files=extra_files)
if verify:
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if inputs is None:
raise ValueError("missing input data for verification.")
inputs = [i.to(device) if isinstance(i, torch.Tensor) else i for i in inputs]
ts_model = torch.jit.load(filename_or_obj) if filename_or_obj is not None else script_module
ts_model.eval().to(device)
model = model.to(device)
with torch.no_grad():
set_determinism(seed=0)
torch_out = ensure_tuple(model(*inputs))
set_determinism(seed=0)
torchscript_out = ensure_tuple(ts_model(*inputs))
set_determinism(seed=None)
# compare TorchScript and PyTorch results
for r1, r2 in zip(torch_out, torchscript_out):
if isinstance(r1, torch.Tensor) or isinstance(r2, torch.Tensor):
torch.testing.assert_allclose(r1, r2, rtol=rtol, atol=atol)
return script_module
| [
"torch.zeros",
"torch.nn.init.constant_",
"torch.no_grad",
"torch.inverse",
"torch.ones",
"torch.jit.load",
"torch.jit.save",
"torch.tensor",
"torch.testing.assert_allclose",
"torch.cuda.is_available",
"torch.jit.script",
"torch.set_grad_enabled",
"torch.reshape"
] | 1.6 | yiheng-wang-nv/MONAI | 885d5b947aeafc1a9bee2899cfd48fff9036e68a |
1.9 | #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional, Union
import datasets
import numpy as np
import torch
import transformers
from datasets import load_dataset
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.fx import symbolic_trace
import yaml
from optimum.intel.neural_compressor import (
IncOptimizer,
IncPruner,
IncPruningConfig,
IncQuantizationConfig,
IncQuantizationMode,
IncQuantizer,
IncTrainer,
)
from optimum.intel.neural_compressor.quantization import IncQuantizedModelForMultipleChoice
from optimum.intel.neural_compressor.utils import CONFIG_NAME
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.12.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class OptimizationArguments:
"""
Arguments pertaining to what type of optimization we are going to apply on the model.
"""
quantize: bool = field(
default=False,
metadata={"help": "Whether or not to apply quantization."},
)
quantization_approach: Optional[str] = field(
default=None,
metadata={"help": "Quantization approach. Supported approach are static, dynamic and aware_training."},
)
prune: bool = field(
default=False,
metadata={"help": "Whether or not to apply pruning."},
)
target_sparsity: Optional[float] = field(
default=None,
metadata={"help": "Targeted sparsity when pruning the model."},
)
quantization_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the quantization and "
"tuning behavior."
},
)
pruning_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the pruning behavior."
},
)
tune_metric: str = field(
default="eval_accuracy",
metadata={"help": "Metric used for the tuning strategy."},
)
perf_tol: Optional[float] = field(
default=None,
metadata={"help": "Performance tolerance when optimizing the model."},
)
verify_loading: bool = field(
default=False,
metadata={"help": "Whether or not to verify the loading of the quantized model."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class DataCollatorForMultipleChoice:
"""
Data collator that will dynamically pad the inputs for multiple choice received.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature.pop(label_name) for feature in features]
batch_size = len(features)
num_choices = len(features[0]["input_ids"])
flattened_features = [
[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
]
flattened_features = sum(flattened_features, [])
batch = self.tokenizer.pad(
flattened_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# Un-flatten
batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
# Add back labels
batch["labels"] = torch.tensor(labels, dtype=torch.int64)
return batch
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, optim_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, optim_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = sum(first_sentences, [])
second_sentences = sum(second_sentences, [])
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=max_seq_length,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
# Initialize our Trainer
trainer = IncTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
eval_dataloader = trainer.get_eval_dataloader()
it = iter(eval_dataloader)
try:
input_names = next(it).keys()
except StopIteration:
input_names = None
logger.warning(
"Unable to determine the names of the inputs of the model to trace, input_names is set to None and "
"model.dummy_inputs().keys() will be used instead."
)
resume_from_checkpoint = training_args.resume_from_checkpoint
metric_name = optim_args.tune_metric
def take_eval_steps(model, trainer, metric_name, save_metrics=False):
trainer.model = model
metrics = trainer.evaluate()
if save_metrics:
trainer.save_metrics("eval", metrics)
logger.info("{}: {}".format(metric_name, metrics.get(metric_name)))
logger.info("Throughput: {} samples/sec".format(metrics.get("eval_samples_per_second")))
return metrics.get(metric_name)
def eval_func(model):
return take_eval_steps(model, trainer, metric_name)
def take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint):
trainer.model_wrapped = model
trainer.model = model
checkpoint = None
if resume_from_checkpoint is not None:
checkpoint = resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(pruner, resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
def train_func(model):
return take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint)
quantizer = None
pruner = None
num_choices = len(eval_dataset[0]["input_ids"])
if not optim_args.quantize and not optim_args.prune:
raise ValueError("quantize and prune are both set to False.")
result_baseline_model = take_eval_steps(model, trainer, metric_name)
default_config = os.path.join(os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir)), "config")
if optim_args.quantize:
if not training_args.do_eval:
raise ValueError("do_eval must be set to True for quantization.")
q8_config = IncQuantizationConfig.from_pretrained(
optim_args.quantization_config if optim_args.quantization_config is not None else default_config,
config_file_name="quantization.yml",
cache_dir=model_args.cache_dir,
)
# Set metric tolerance if specified
if optim_args.perf_tol is not None:
q8_config.set_tolerance(optim_args.perf_tol)
# Set quantization approach if specified
if optim_args.quantization_approach is not None:
supported_approach = {"static", "dynamic", "aware_training"}
if optim_args.quantization_approach not in supported_approach:
raise ValueError(
"Unknown quantization approach. Supported approach are " + ", ".join(supported_approach)
)
quant_approach = getattr(IncQuantizationMode, optim_args.quantization_approach.upper()).value
q8_config.set_config("quantization.approach", quant_approach)
# torch FX used for post-training quantization and quantization aware training
# dynamic quantization will be added when torch FX is more mature
if q8_config.get_config("quantization.approach") != IncQuantizationMode.DYNAMIC.value:
if not training_args.do_train:
raise ValueError("do_train must be set to True for static and aware training quantization.")
# TODO : Remove when dynamic axes support
if (
not training_args.dataloader_drop_last
and eval_dataset.shape[0] % training_args.per_device_eval_batch_size != 0
):
raise ValueError(
"The number of samples of the dataset is not a multiple of the batch size."
"Use --dataloader_drop_last to overcome."
)
if not data_args.pad_to_max_length:
raise ValueError(
"All the samples must have the same sequence length, use --pad_to_max_length to overcome."
)
q8_config.set_config("model.framework", "pytorch_fx")
model.config.save_pretrained(training_args.output_dir)
model = symbolic_trace(
model,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
calib_dataloader = trainer.get_train_dataloader()
inc_quantizer = IncQuantizer(
model, q8_config, eval_func=eval_func, train_func=train_func, calib_dataloader=calib_dataloader
)
quantizer = inc_quantizer.fit()
if optim_args.prune:
if not training_args.do_train:
raise ValueError("do_train must be set to True for pruning.")
pruning_config = IncPruningConfig.from_pretrained(
optim_args.pruning_config if optim_args.pruning_config is not None else default_config,
config_file_name="prune.yml",
cache_dir=model_args.cache_dir,
)
# Set targeted sparsity if specified
if optim_args.target_sparsity is not None:
pruning_config.set_config(
"pruning.approach.weight_compression.target_sparsity", optim_args.target_sparsity
)
pruning_start_epoch = pruning_config.get_config("pruning.approach.weight_compression.start_epoch")
pruning_end_epoch = pruning_config.get_config("pruning.approach.weight_compression.end_epoch")
if pruning_start_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_start_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. No pruning will be applied."
)
if pruning_end_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_end_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. The target sparsity will not be reached."
)
inc_pruner = IncPruner(model, pruning_config, eval_func=eval_func, train_func=train_func)
# Creation Pruning object used for IncTrainer training loop
pruner = inc_pruner.fit()
inc_optimizer = IncOptimizer(model, quantizer=quantizer, pruner=pruner)
opt_model = inc_optimizer.fit()
_, sparsity = opt_model.report_sparsity()
result_opt_model = take_eval_steps(opt_model.model, trainer, metric_name, save_metrics=True)
trainer.save_model(training_args.output_dir)
with open(os.path.join(training_args.output_dir, CONFIG_NAME), "w") as f:
yaml.dump(opt_model.tune_cfg, f, default_flow_style=False)
logger.info(
f"Optimized model with final sparsity of {sparsity} and {metric_name} of {result_opt_model} saved to: "
f"{training_args.output_dir}. Original model had an {metric_name} of {result_baseline_model}"
)
if optim_args.quantize and optim_args.verify_loading:
# Load the model obtained after Intel Neural Compressor (INC) quantization
loaded_model = IncQuantizedModelForMultipleChoice.from_pretrained(
training_args.output_dir,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
loaded_model.eval()
result_loaded_model = take_eval_steps(loaded_model, trainer, metric_name)
if result_loaded_model != result_opt_model:
raise ValueError("The quantized model was not successfully loaded.")
else:
logger.info(f"The quantized model was successfully loaded.")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| [
"torch.tensor"
] | 1.9 | michaelbenayoun/optimum | 21c5809577e2ef5687f293d31d1d3e28288e1bb7 |
1.3 | """
Entry point for training and evaluating a dependency parser.
This implementation combines a deep biaffine graph-based parser with linearization and distance features.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
"""
Training and evaluation for the parser.
"""
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from stanza.models.depparse.data import DataLoader
from stanza.models.depparse.trainer import Trainer
from stanza.models.depparse import scorer
from stanza.models.common import utils
from stanza.models.common.pretrain import Pretrain
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
from stanza.models import _training_logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')
parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=400)
parser.add_argument('--char_hidden_dim', type=int, default=400)
parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)
parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--pretrain_max_vocab', type=int, default=250000)
parser.add_argument('--word_dropout', type=float, default=0.33)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--no_linearization', dest='linearization', action='store_false', help="Turn off linearization term.")
parser.add_argument('--no_distance', dest='distance', action='store_false', help="Turn off distance term.")
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=50000)
parser.add_argument('--eval_interval', type=int, default=100)
parser.add_argument('--max_steps_before_stop', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=5000)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running parser in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
utils.ensure_dir(args['save_dir'])
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
# load pretrained vectors if needed
pretrain = None
if args['pretrain']:
vec_file = args['wordvec_file'] if args['wordvec_file'] else utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
train_doc = Document(CoNLL.conll2dict(input_file=args['train_file']))
train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
dev_doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
print("Skip training because no data available...")
sys.exit(0)
print("Training parser...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
using_amsgrad = False
last_best_step = 0
# start training
train_loss = 0
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, loss, duration, current_lr))
if global_step % args['eval_interval'] == 0:
# eval on dev
print("Evaluating on dev set...")
dev_preds = []
for batch in dev_batch:
preds = trainer.predict(batch)
dev_preds += preds
dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)
dev_batch.doc.set([HEAD, DEPREL], [y for x in dev_preds for y in x])
CoNLL.dict2conll(dev_batch.doc.to_dict(), system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / args['eval_interval'] # avg loss per batch
print("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
train_loss = 0
# save best model
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
last_best_step = global_step
trainer.save(model_file)
print("new best model saved.")
best_dev_preds = dev_preds
dev_score_history += [dev_score]
print("")
if global_step - last_best_step >= args['max_steps_before_stop']:
if not using_amsgrad:
print("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
else:
do_break = True
break
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
train_batch.reshuffle()
print("Training ended with {} steps.".format(global_step))
best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
print("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
# load pretrain; note that we allow the pretrain_file to be non-existent
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file)
# load model
print("Loading model from: {}".format(model_file))
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load config
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
loaded_args[k] = args[k]
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
if len(batch) > 0:
print("Start evaluation...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
else:
# skip eval if dev data does not exist
preds = []
preds = utils.unsort(preds, batch.data_orig_idx)
# write to file and score
batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])
CoNLL.dict2conll(batch.doc.to_dict(), system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("Parser score:")
print("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| [
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.cuda.is_available"
] | 1.3.0 | de9uch1/stanza | cafb7d5004842cd3c8a3ac334ce7649bac928830 |
1.3 | """
A trainer class to handle training and testing of models.
"""
import sys
import numpy as np
from collections import Counter
import logging
import torch
from torch import nn
import torch.nn.init as init
import stanza.models.common.seq2seq_constant as constant
from stanza.models.common.seq2seq_model import Seq2SeqModel
from stanza.models.common import utils, loss
from stanza.models.lemma import edit
from stanza.models.lemma.vocab import MultiVocab
logger = logging.getLogger('stanza')
def unpack_batch(batch, use_cuda):
""" Unpack a batch from the data loader. """
if use_cuda:
inputs = [b.cuda() if b is not None else None for b in batch[:6]]
else:
inputs = [b if b is not None else None for b in batch[:6]]
orig_idx = batch[6]
return inputs, orig_idx
class Trainer(object):
""" A trainer for training models. """
def __init__(self, args=None, vocab=None, emb_matrix=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if model_file is not None:
# load everything from file
self.load(model_file, use_cuda)
else:
# build model from scratch
self.args = args
self.model = None if args['dict_only'] else Seq2SeqModel(args, emb_matrix=emb_matrix, use_cuda=use_cuda)
self.vocab = vocab
# dict-based components
self.word_dict = dict()
self.composite_dict = dict()
if not self.args['dict_only']:
if self.args.get('edit', False):
self.crit = loss.MixLoss(self.vocab['char'].size, self.args['alpha'])
logger.debug("Running seq2seq lemmatizer with edit classifier...")
else:
self.crit = loss.SequenceLoss(self.vocab['char'].size)
self.parameters = [p for p in self.model.parameters() if p.requires_grad]
if use_cuda:
self.model.cuda()
self.crit.cuda()
else:
self.model.cpu()
self.crit.cpu()
self.optimizer = utils.get_optimizer(self.args['optim'], self.parameters, self.args['lr'])
def update(self, batch, eval=False):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt_in, tgt_out, pos, edits = inputs
if eval:
self.model.eval()
else:
self.model.train()
self.optimizer.zero_grad()
log_probs, edit_logits = self.model(src, src_mask, tgt_in, pos)
if self.args.get('edit', False):
assert edit_logits is not None
loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1), \
edit_logits, edits)
else:
loss = self.crit(log_probs.view(-1, self.vocab['char'].size), tgt_out.view(-1))
loss_val = loss.data.item()
if eval:
return loss_val
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch, beam_size=1):
inputs, orig_idx = unpack_batch(batch, self.use_cuda)
src, src_mask, tgt, tgt_mask, pos, edits = inputs
self.model.eval()
batch_size = src.size(0)
preds, edit_logits = self.model.predict(src, src_mask, pos=pos, beam_size=beam_size)
pred_seqs = [self.vocab['char'].unmap(ids) for ids in preds] # unmap to tokens
pred_seqs = utils.prune_decoded_seqs(pred_seqs)
pred_tokens = ["".join(seq) for seq in pred_seqs] # join chars to be tokens
pred_tokens = utils.unsort(pred_tokens, orig_idx)
if self.args.get('edit', False):
assert edit_logits is not None
edits = np.argmax(edit_logits.data.cpu().numpy(), axis=1).reshape([batch_size]).tolist()
edits = utils.unsort(edits, orig_idx)
else:
edits = None
return pred_tokens, edits
def postprocess(self, words, preds, edits=None):
""" Postprocess, mainly for handing edits. """
assert len(words) == len(preds), "Lemma predictions must have same length as words."
edited = []
if self.args.get('edit', False):
assert edits is not None and len(words) == len(edits)
for w, p, e in zip(words, preds, edits):
lem = edit.edit_word(w, p, e)
edited += [lem]
else:
edited = preds # do not edit
# final sanity check
assert len(edited) == len(words)
final = []
for lem, w in zip(edited, words):
if len(lem) == 0 or constant.UNK in lem:
final += [w] # invalid prediction, fall back to word
else:
final += [lem]
return final
def update_lr(self, new_lr):
utils.change_lr(self.optimizer, new_lr)
def train_dict(self, triples):
""" Train a dict lemmatizer given training (word, pos, lemma) triples. """
# accumulate counter
ctr = Counter()
ctr.update([(p[0], p[1], p[2]) for p in triples])
# find the most frequent mappings
for p, _ in ctr.most_common():
w, pos, l = p
if (w,pos) not in self.composite_dict:
self.composite_dict[(w,pos)] = l
if w not in self.word_dict:
self.word_dict[w] = l
return
def predict_dict(self, pairs):
""" Predict a list of lemmas using the dict model given (word, pos) pairs. """
lemmas = []
for p in pairs:
w, pos = p
if (w,pos) in self.composite_dict:
lemmas += [self.composite_dict[(w,pos)]]
elif w in self.word_dict:
lemmas += [self.word_dict[w]]
else:
lemmas += [w]
return lemmas
def skip_seq2seq(self, pairs):
""" Determine if we can skip the seq2seq module when ensembling with the frequency lexicon. """
skip = []
for p in pairs:
w, pos = p
if (w,pos) in self.composite_dict:
skip.append(True)
elif w in self.word_dict:
skip.append(True)
else:
skip.append(False)
return skip
def ensemble(self, pairs, other_preds):
""" Ensemble the dict with statistical model predictions. """
lemmas = []
assert len(pairs) == len(other_preds)
for p, pred in zip(pairs, other_preds):
w, pos = p
if (w,pos) in self.composite_dict:
lemma = self.composite_dict[(w,pos)]
elif w in self.word_dict:
lemma = self.word_dict[w]
else:
lemma = pred
if lemma is None:
lemma = w
lemmas.append(lemma)
return lemmas
def save(self, filename):
params = {
'model': self.model.state_dict() if self.model is not None else None,
'dicts': (self.word_dict, self.composite_dict),
'vocab': self.vocab.state_dict(),
'config': self.args
}
try:
torch.save(params, filename)
logger.info("Model saved to {}".format(filename))
except BaseException:
logger.warning("Saving failed... continuing anyway.")
def load(self, filename, use_cuda=False):
try:
checkpoint = torch.load(filename, lambda storage, loc: storage)
except BaseException:
logger.error("Cannot load model from {}".format(filename))
raise
self.args = checkpoint['config']
self.word_dict, self.composite_dict = checkpoint['dicts']
if not self.args['dict_only']:
self.model = Seq2SeqModel(self.args, use_cuda=use_cuda)
self.model.load_state_dict(checkpoint['model'])
else:
self.model = None
self.vocab = MultiVocab.load_state_dict(checkpoint['vocab'])
| [
"torch.save",
"torch.load"
] | 1.3.0 | de9uch1/stanza | cafb7d5004842cd3c8a3ac334ce7649bac928830 |
1.6 | import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
class PSNRLoss(nn.Module):
r"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image, the PSNR is:
.. math::
\text{PSNR} = 10 \log_{10} \bigg(\frac{\text{MAX}_I^2}{MSE(I,T)}\bigg)
where
.. math::
\text{MSE}(I,T) = \frac{1}{mn}\sum_{i=0}^{m-1}\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
and :math:`\text{MAX}_I` is the maximum possible input value
(e.g for floating point images :math:`\text{MAX}_I=1`).
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr_loss(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
Reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: float) -> None:
super(PSNRLoss, self).__init__()
self.max_val: float = max_val
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: # type: ignore
return psnr_loss(input, target, self.max_val)
def psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
r"""Function that computes PSNR
See :class:`~kornia.losses.PSNRLoss` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(f"Expected 2 torch tensors but got {type(input)} and {type(target)}")
if input.shape != target.shape:
raise TypeError(f"Expected tensors of equal shapes, but got {input.shape} and {target.shape}")
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: torch.Tensor = torch.tensor(max_val).to(input.device).to(input.dtype)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
| [
"torch.nn.functional.mse_loss",
"torch.is_tensor",
"torch.tensor",
"torch.log10"
] | 1.6.0 | pmeier/kornia | 57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf |
1.6 | from typing import Tuple, List, Union, cast
import torch
import torch.nn as nn
from kornia.geometry.transform.affwarp import rotate
def normalize_kernel2d(input: torch.Tensor) -> torch.Tensor:
r"""Normalizes both derivative and smoothing kernel.
"""
if len(input.size()) < 2:
raise TypeError("input should be at least 2D tensor. Got {}"
.format(input.size()))
norm: torch.Tensor = input.abs().sum(dim=-1).sum(dim=-1)
return input / (norm.unsqueeze(-1).unsqueeze(-1))
def gaussian(window_size, sigma):
x = torch.arange(window_size).float() - window_size // 2
if window_size % 2 == 0:
x = x + 0.5
gauss = torch.exp((-x.pow(2.0) / float(2 * sigma ** 2)))
return gauss / gauss.sum()
def laplacian_1d(window_size) -> torch.Tensor:
r"""One could also use the Laplacian of Gaussian formula
to design the filter.
"""
filter_1d = torch.ones(window_size)
filter_1d[window_size // 2] = 1 - window_size
laplacian_1d: torch.Tensor = filter_1d
return laplacian_1d
def get_box_kernel2d(kernel_size: Tuple[int, int]) -> torch.Tensor:
r"""Utility function that returns a box filter."""
kx: float = float(kernel_size[0])
ky: float = float(kernel_size[1])
scale: torch.Tensor = torch.tensor(1.) / torch.tensor([kx * ky])
tmp_kernel: torch.Tensor = torch.ones(1, kernel_size[0], kernel_size[1])
return scale.to(tmp_kernel.dtype) * tmp_kernel
def get_binary_kernel2d(window_size: Tuple[int, int]) -> torch.Tensor:
r"""Creates a binary kernel to extract the patches. If the window size
is HxW will create a (H*W)xHxW kernel.
"""
window_range: int = window_size[0] * window_size[1]
kernel: torch.Tensor = torch.zeros(window_range, window_range)
for i in range(window_range):
kernel[i, i] += 1.0
return kernel.view(window_range, 1, window_size[0], window_size[1])
def get_sobel_kernel_3x3() -> torch.Tensor:
"""Utility function that returns a sobel kernel of 3x3"""
return torch.tensor([
[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.],
])
def get_sobel_kernel_5x5_2nd_order() -> torch.Tensor:
"""Utility function that returns a 2nd order sobel kernel of 5x5"""
return torch.tensor([
[-1., 0., 2., 0., -1.],
[-4., 0., 8., 0., -4.],
[-6., 0., 12., 0., -6.],
[-4., 0., 8., 0., -4.],
[-1., 0., 2., 0., -1.]
])
def _get_sobel_kernel_5x5_2nd_order_xy() -> torch.Tensor:
"""Utility function that returns a 2nd order sobel kernel of 5x5"""
return torch.tensor([
[-1., -2., 0., 2., 1.],
[-2., -4., 0., 4., 2.],
[0., 0., 0., 0., 0.],
[2., 4., 0., -4., -2.],
[1., 2., 0., -2., -1.]
])
def get_diff_kernel_3x3() -> torch.Tensor:
"""Utility function that returns a sobel kernel of 3x3"""
return torch.tensor([
[-0., 0., 0.],
[-1., 0., 1.],
[-0., 0., 0.],
])
def get_diff_kernel3d(device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
"""Utility function that returns a first order derivative kernel of 3x3x3"""
kernel: torch.Tensor = torch.tensor([[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[-0.5, 0.0, 0.5],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, -0.5, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.5, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, -0.5, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.5, 0.0],
[0.0, 0.0, 0.0]],
],
], device=device, dtype=dtype)
return kernel.unsqueeze(1)
def get_diff_kernel3d_2nd_order(device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
"""Utility function that returns a first order derivative kernel of 3x3x3"""
kernel: torch.Tensor = torch.tensor([[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[1.0, -2.0, 1.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 1.0, 0.0],
[0.0, -2.0, 0.0],
[0.0, 1.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, -2.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[1.0, 0.0, -1.0],
[0.0, 0.0, 0.0],
[-1.0, 0.0, 1.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
],
[[[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, -1.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, -1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0]],
],
[[[0.0, 0.0, 0.0],
[1.0, 0.0, -1.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0],
[-1.0, 0.0, 1.0],
[0.0, 0.0, 0.0]],
],
], device=device, dtype=dtype)
return kernel.unsqueeze(1)
def get_sobel_kernel2d() -> torch.Tensor:
kernel_x: torch.Tensor = get_sobel_kernel_3x3()
kernel_y: torch.Tensor = kernel_x.transpose(0, 1)
return torch.stack([kernel_x, kernel_y])
def get_diff_kernel2d() -> torch.Tensor:
kernel_x: torch.Tensor = get_diff_kernel_3x3()
kernel_y: torch.Tensor = kernel_x.transpose(0, 1)
return torch.stack([kernel_x, kernel_y])
def get_sobel_kernel2d_2nd_order() -> torch.Tensor:
gxx: torch.Tensor = get_sobel_kernel_5x5_2nd_order()
gyy: torch.Tensor = gxx.transpose(0, 1)
gxy: torch.Tensor = _get_sobel_kernel_5x5_2nd_order_xy()
return torch.stack([gxx, gxy, gyy])
def get_diff_kernel2d_2nd_order() -> torch.Tensor:
gxx: torch.Tensor = torch.tensor([
[0., 0., 0.],
[1., -2., 1.],
[0., 0., 0.],
])
gyy: torch.Tensor = gxx.transpose(0, 1)
gxy: torch.Tensor = torch.tensor([
[-1., 0., 1.],
[0., 0., 0.],
[1., 0., -1.],
])
return torch.stack([gxx, gxy, gyy])
def get_spatial_gradient_kernel2d(mode: str, order: int) -> torch.Tensor:
r"""Function that returns kernel for 1st or 2nd order image gradients,
using one of the following operators: sobel, diff"""
if mode not in ['sobel', 'diff']:
raise TypeError("mode should be either sobel\
or diff. Got {}".format(mode))
if order not in [1, 2]:
raise TypeError("order should be either 1 or 2\
Got {}".format(order))
if mode == 'sobel' and order == 1:
kernel: torch.Tensor = get_sobel_kernel2d()
elif mode == 'sobel' and order == 2:
kernel = get_sobel_kernel2d_2nd_order()
elif mode == 'diff' and order == 1:
kernel = get_diff_kernel2d()
elif mode == 'diff' and order == 2:
kernel = get_diff_kernel2d_2nd_order()
else:
raise NotImplementedError("")
return kernel
def get_spatial_gradient_kernel3d(mode: str, order: int, device=torch.device('cpu'), dtype=torch.float) -> torch.Tensor:
r"""Function that returns kernel for 1st or 2nd order scale pyramid gradients,
using one of the following operators: sobel, diff"""
if mode not in ['sobel', 'diff']:
raise TypeError("mode should be either sobel\
or diff. Got {}".format(mode))
if order not in [1, 2]:
raise TypeError("order should be either 1 or 2\
Got {}".format(order))
if mode == 'sobel':
raise NotImplementedError("Sobel kernel for 3d gradient is not implemented yet")
elif mode == 'diff' and order == 1:
kernel = get_diff_kernel3d(device, dtype)
elif mode == 'diff' and order == 2:
kernel = get_diff_kernel3d_2nd_order(device, dtype)
else:
raise NotImplementedError("")
return kernel
def get_gaussian_kernel1d(kernel_size: int,
sigma: float,
force_even: bool = False) -> torch.Tensor:
r"""Function that returns Gaussian filter coefficients.
Args:
kernel_size (int): filter size. It should be odd and positive.
sigma (float): gaussian standard deviation.
force_even (bool): overrides requirement for odd kernel size.
Returns:
Tensor: 1D tensor with gaussian filter coefficients.
Shape:
- Output: :math:`(\text{kernel_size})`
Examples::
>>> kornia.image.get_gaussian_kernel(3, 2.5)
tensor([0.3243, 0.3513, 0.3243])
>>> kornia.image.get_gaussian_kernel(5, 1.5)
tensor([0.1201, 0.2339, 0.2921, 0.2339, 0.1201])
"""
if (not isinstance(kernel_size, int) or (
(kernel_size % 2 == 0) and not force_even) or (
kernel_size <= 0)):
raise TypeError(
"kernel_size must be an odd positive integer. "
"Got {}".format(kernel_size)
)
window_1d: torch.Tensor = gaussian(kernel_size, sigma)
return window_1d
def get_gaussian_kernel2d(
kernel_size: Tuple[int, int],
sigma: Tuple[float, float],
force_even: bool = False) -> torch.Tensor:
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (Tuple[int, int]): filter sizes in the x and y direction.
Sizes should be odd and positive.
sigma (Tuple[int, int]): gaussian standard deviation in the x and y
direction.
force_even (bool): overrides requirement for odd kernel size.
Returns:
Tensor: 2D tensor with gaussian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_gaussian_kernel2d((3, 3), (1.5, 1.5))
tensor([[0.0947, 0.1183, 0.0947],
[0.1183, 0.1478, 0.1183],
[0.0947, 0.1183, 0.0947]])
>>> kornia.image.get_gaussian_kernel2d((3, 5), (1.5, 1.5))
tensor([[0.0370, 0.0720, 0.0899, 0.0720, 0.0370],
[0.0462, 0.0899, 0.1123, 0.0899, 0.0462],
[0.0370, 0.0720, 0.0899, 0.0720, 0.0370]])
"""
if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:
raise TypeError(
"kernel_size must be a tuple of length two. Got {}".format(
kernel_size
)
)
if not isinstance(sigma, tuple) or len(sigma) != 2:
raise TypeError(
"sigma must be a tuple of length two. Got {}".format(sigma)
)
ksize_x, ksize_y = kernel_size
sigma_x, sigma_y = sigma
kernel_x: torch.Tensor = get_gaussian_kernel1d(ksize_x, sigma_x, force_even)
kernel_y: torch.Tensor = get_gaussian_kernel1d(ksize_y, sigma_y, force_even)
kernel_2d: torch.Tensor = torch.matmul(
kernel_x.unsqueeze(-1), kernel_y.unsqueeze(-1).t()
)
return kernel_2d
def get_laplacian_kernel1d(kernel_size: int) -> torch.Tensor:
r"""Function that returns the coefficients of a 1D Laplacian filter.
Args:
kernel_size (int): filter size. It should be odd and positive.
Returns:
Tensor (float): 1D tensor with laplacian filter coefficients.
Shape:
- Output: math:`(\text{kernel_size})`
Examples::
>>> kornia.image.get_laplacian_kernel(3)
tensor([ 1., -2., 1.])
>>> kornia.image.get_laplacian_kernel(5)
tensor([ 1., 1., -4., 1., 1.])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("ksize must be an odd positive integer. Got {}"
.format(kernel_size))
window_1d: torch.Tensor = laplacian_1d(kernel_size)
return window_1d
def get_laplacian_kernel2d(kernel_size: int) -> torch.Tensor:
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (int): filter size should be odd.
Returns:
Tensor: 2D tensor with laplacian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_laplacian_kernel2d(3)
tensor([[ 1., 1., 1.],
[ 1., -8., 1.],
[ 1., 1., 1.]])
>>> kornia.image.get_laplacian_kernel2d(5)
tensor([[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., -24., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.]])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("ksize must be an odd positive integer. Got {}"
.format(kernel_size))
kernel = torch.ones((kernel_size, kernel_size))
mid = kernel_size // 2
kernel[mid, mid] = 1 - kernel_size ** 2
kernel_2d: torch.Tensor = kernel
return kernel_2d
def get_motion_kernel2d(kernel_size: int, angle: Union[torch.Tensor, float],
direction: Union[torch.Tensor, float] = 0.) -> torch.Tensor:
r"""Function that returns motion blur filter.
Args:
kernel_size (int): motion kernel width and height. It should be odd and positive.
angle (torch.Tensor, float): angle of the motion blur in degrees (anti-clockwise rotation).
direction (float): forward/backward direction of the motion blur.
Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
uniformly (but still angled) motion blur.
Returns:
torch.Tensor: the motion blur kernel.
Shape:
- Output: :math:`(ksize, ksize)`
Examples::
>>> kornia.filters.get_motion_kernel2d(5, 0., 0.)
tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.2000, 0.2000, 0.2000, 0.2000, 0.2000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000]])
>>> kornia.filters.get_motion_kernel2d(3, 215., -0.5)
tensor([[0.0000, 0.0412, 0.0732],
[0.1920, 0.3194, 0.0804],
[0.2195, 0.0743, 0.0000]])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or kernel_size < 3:
raise TypeError("ksize must be an odd integer >= than 3")
if not isinstance(angle, torch.Tensor):
angle = torch.tensor([angle])
angle = cast(torch.Tensor, angle)
if angle.dim() == 0:
angle = angle.unsqueeze(dim=0)
assert angle.dim() == 1, f"angle must be a 1-dim tensor. Got {angle}."
if not isinstance(direction, torch.Tensor):
direction = torch.tensor([direction])
direction = cast(torch.Tensor, direction)
if direction.dim() == 0:
direction = direction.unsqueeze(dim=0)
assert direction.dim() == 1, f"direction must be a 1-dim tensor. Got {direction}."
kernel_tuple: Tuple[int, int] = (kernel_size, kernel_size)
# direction from [-1, 1] to [0, 1] range
direction = (torch.clamp(direction, -1., 1.).item() + 1.) / 2.
kernel = torch.zeros(kernel_tuple, dtype=torch.float)
kernel[kernel_tuple[0] // 2, :] = torch.linspace(direction, 1. - direction, steps=kernel_tuple[0])
kernel = kernel.unsqueeze(0).unsqueeze(0)
# rotate (counterclockwise) kernel by given angle
kernel = rotate(kernel, angle)
kernel = kernel[0][0]
kernel = kernel / kernel.sum()
return kernel
| [
"torch.zeros",
"torch.device",
"torch.stack",
"torch.arange",
"torch.linspace",
"torch.clamp",
"torch.ones",
"torch.tensor"
] | 1.6.0 | pmeier/kornia | 57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf |
1.2 | from __future__ import absolute_import
import torch.nn as nn
import math
__all__ = ["resnet"]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet for CIFAR10/100 dataset."""
def __init__(self, depth, num_classes=1000, block_name="BasicBlock"):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == "basicblock":
assert (
depth - 2
) % 6 == 0, "When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202"
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == "bottleneck":
assert (
depth - 2
) % 9 == 0, "When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199"
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError("block_name shoule be Basicblock or Bottleneck")
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs)
| [
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.2.0 | awwong1/ml-research | 6f0bb585fef0c4567a5f02937fea62726b9c88dd |
1.0 | """
Library for extracting interesting quantites from autograd, see README.md
Not thread-safe because of module-level variables
Notation:
o: number of output classes (exact Hessian), number of Hessian samples (sampled Hessian)
n: batch-size
do: output dimension (output channels for convolution)
di: input dimension (input channels for convolution)
Hi: per-example Hessian of matmul, shaped as matrix of [dim, dim], indices have been row-vectorized
Hi_bias: per-example Hessian of bias
Oh, Ow: output height, output width (convolution)
Kh, Kw: kernel height, kernel width (convolution)
Jb: batch output Jacobian of matmul, output sensitivity for example,class pair, [o, n, ....]
Jb_bias: as above, but for bias
A, activations: inputs into current layer
B, backprops: backprop values (aka Lop aka Jacobian-vector product) observed at current layer
"""
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
_supported_layers = ['Linear', 'Conv2d'] # Supported layer class types
_hooks_disabled: bool = False # work-around for https://github.com/pytorch/pytorch/issues/25723
_enforce_fresh_backprop: bool = False # global switch to catch double backprop errors on Hessian computation
def add_hooks(model: nn.Module) -> None:
"""
Adds hooks to model to save activations and backprop values.
The hooks will
1. save activations into param.activations during forward pass
2. append backprops to params.backprops_list during backward pass.
Call "remove_hooks(model)" to disable this.
Args:
model:
"""
global _hooks_disabled
_hooks_disabled = False
handles = []
for layer in model.modules():
if _layer_type(layer) in _supported_layers:
handles.append(layer.register_forward_hook(_capture_activations))
handles.append(layer.register_backward_hook(_capture_backprops))
model.__dict__.setdefault('autograd_hacks_hooks', []).extend(handles)
def remove_hooks(model: nn.Module) -> None:
"""
Remove hooks added by add_hooks(model)
"""
assert model == 0, "not working, remove this after fix to https://github.com/pytorch/pytorch/issues/25723"
if not hasattr(model, 'autograd_hacks_hooks'):
print("Warning, asked to remove hooks, but no hooks found")
else:
for handle in model.autograd_hacks_hooks:
handle.remove()
del model.autograd_hacks_hooks
def disable_hooks() -> None:
"""
Globally disable all hooks installed by this library.
"""
global _hooks_disabled
_hooks_disabled = True
def enable_hooks() -> None:
"""the opposite of disable_hooks()"""
global _hooks_disabled
_hooks_disabled = False
def is_supported(layer: nn.Module) -> bool:
"""Check if this layer is supported"""
return _layer_type(layer) in _supported_layers
def _layer_type(layer: nn.Module) -> str:
return layer.__class__.__name__
def _capture_activations(layer: nn.Module, input: List[torch.Tensor], output: torch.Tensor):
"""Save activations into layer.activations in forward pass"""
if _hooks_disabled:
return
assert _layer_type(layer) in _supported_layers, "Hook installed on unsupported layer, this shouldn't happen"
setattr(layer, "activations", input[0].detach())
def _capture_backprops(layer: nn.Module, _input, output):
"""Append backprop to layer.backprops_list in backward pass."""
global _enforce_fresh_backprop
if _hooks_disabled:
return
if _enforce_fresh_backprop:
assert not hasattr(layer, 'backprops_list'), "Seeing result of previous backprop, use clear_backprops(model) to clear"
_enforce_fresh_backprop = False
if not hasattr(layer, 'backprops_list'):
setattr(layer, 'backprops_list', [])
layer.backprops_list.append(output[0].detach())
def clear_backprops(model: nn.Module) -> None:
"""Delete layer.backprops_list in every layer."""
for layer in model.modules():
if hasattr(layer, 'backprops_list'):
del layer.backprops_list
def compute_grad1(model: nn.Module, loss_type: str = 'mean') -> None:
"""
Compute per-example gradients and save them under 'param.grad1'. Must be called after loss.backprop()
Args:
model:
loss_type: either "mean" or "sum" depending whether backpropped loss was averaged or summed over batch
"""
assert loss_type in ('sum', 'mean')
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
assert len(layer.backprops_list) == 1, "Multiple backprops detected, make sure to call clear_backprops(model)"
A = layer.activations
n = A.shape[0]
if loss_type == 'mean':
B = layer.backprops_list[0] * n
else: # loss_type == 'sum':
B = layer.backprops_list[0]
if layer_type == 'Linear':
setattr(layer.weight, 'grad1', torch.einsum('ni,nj->nij', B, A))
if layer.bias is not None:
setattr(layer.bias, 'grad1', B)
elif layer_type == 'Conv2d':
A = torch.nn.functional.unfold(A, layer.kernel_size, dilation=layer.dilation, padding=layer.padding, stride=layer.stride)
#A = torch.nn.functional.unfold(A, layer.kernel_size)
B = B.reshape(n, -1, A.shape[-1])
grad1 = torch.einsum('ijk,ilk->ijl', B, A)
shape = [n] + list(layer.weight.shape)
setattr(layer.weight, 'grad1', grad1.reshape(shape))
if layer.bias is not None:
setattr(layer.bias, 'grad1', torch.sum(B, dim=2))
def compute_hess(model: nn.Module,) -> None:
"""Save Hessian under param.hess for each param in the model"""
for layer in model.modules():
layer_type = _layer_type(layer)
if layer_type not in _supported_layers:
continue
assert hasattr(layer, 'activations'), "No activations detected, run forward after add_hooks(model)"
assert hasattr(layer, 'backprops_list'), "No backprops detected, run backward after add_hooks(model)"
if layer_type == 'Linear':
A = layer.activations
B = torch.stack(layer.backprops_list)
n = A.shape[0]
o = B.shape[0]
A = torch.stack([A] * o)
Jb = torch.einsum("oni,onj->onij", B, A).reshape(n*o, -1)
H = torch.einsum('ni,nj->ij', Jb, Jb) / n
setattr(layer.weight, 'hess', H)
if layer.bias is not None:
setattr(layer.bias, 'hess', torch.einsum('oni,onj->ij', B, B)/n)
elif layer_type == 'Conv2d':
Kh, Kw = layer.kernel_size
di, do = layer.in_channels, layer.out_channels
A = layer.activations.detach()
A = torch.nn.functional.unfold(A, (Kh, Kw)) # n, di * Kh * Kw, Oh * Ow
n = A.shape[0]
B = torch.stack([Bt.reshape(n, do, -1) for Bt in layer.backprops_list]) # o, n, do, Oh*Ow
o = B.shape[0]
A = torch.stack([A] * o) # o, n, di * Kh * Kw, Oh*Ow
Jb = torch.einsum('onij,onkj->onik', B, A) # o, n, do, di * Kh * Kw
Hi = torch.einsum('onij,onkl->nijkl', Jb, Jb) # n, do, di*Kh*Kw, do, di*Kh*Kw
Jb_bias = torch.einsum('onij->oni', B)
Hi_bias = torch.einsum('oni,onj->nij', Jb_bias, Jb_bias)
setattr(layer.weight, 'hess', Hi.mean(dim=0))
if layer.bias is not None:
setattr(layer.bias, 'hess', Hi_bias.mean(dim=0))
def backprop_hess(output: torch.Tensor, hess_type: str) -> None:
"""
Call backprop 1 or more times to get values needed for Hessian computation.
Args:
output: prediction of neural network (ie, input of nn.CrossEntropyLoss())
hess_type: type of Hessian propagation, "CrossEntropy" results in exact Hessian for CrossEntropy
Returns:
"""
assert hess_type in ('LeastSquares', 'CrossEntropy')
global _enforce_fresh_backprop
n, o = output.shape
_enforce_fresh_backprop = True
if hess_type == 'CrossEntropy':
batch = F.softmax(output, dim=1)
mask = torch.eye(o).expand(n, o, o)
diag_part = batch.unsqueeze(2).expand(n, o, o) * mask
outer_prod_part = torch.einsum('ij,ik->ijk', batch, batch)
hess = diag_part - outer_prod_part
assert hess.shape == (n, o, o)
for i in range(n):
hess[i, :, :] = symsqrt(hess[i, :, :])
hess = hess.transpose(0, 1)
elif hess_type == 'LeastSquares':
hess = []
assert len(output.shape) == 2
batch_size, output_size = output.shape
id_mat = torch.eye(output_size)
for out_idx in range(output_size):
hess.append(torch.stack([id_mat[out_idx]] * batch_size))
for o in range(o):
output.backward(hess[o], retain_graph=True)
def symsqrt(a, cond=None, return_rank=False, dtype=torch.float32):
"""Symmetric square root of a positive semi-definite matrix.
See https://github.com/pytorch/pytorch/issues/25481"""
s, u = torch.symeig(a, eigenvectors=True)
cond_dict = {torch.float32: 1e3 * 1.1920929e-07, torch.float64: 1E6 * 2.220446049250313e-16}
if cond in [None, -1]:
cond = cond_dict[dtype]
above_cutoff = (abs(s) > cond * torch.max(abs(s)))
psigma_diag = torch.sqrt(s[above_cutoff])
u = u[:, above_cutoff]
B = u @ torch.diag(psigma_diag) @ u.t()
if return_rank:
return B, len(psigma_diag)
else:
return B
| [
"torch.symeig",
"torch.sqrt",
"torch.stack",
"torch.nn.functional.unfold",
"torch.einsum",
"torch.eye",
"torch.nn.functional.softmax",
"torch.diag",
"torch.sum"
] | 1.0 | shyhuai/kfac_pytorch | f5a99366fa94345697432a8aabdc5d370f68d06f |
0.4 | #!/usr/bin/env python3
"""Script to test a pytorch model on Cifar100's validation set."""
import argparse
import logging
import pprint
import sys
import time
import torch
from torch import nn
from models import model_factory
import opts
import utils
import mul_cifar100
def parse_args(argv):
"""Parse arguments @argv and return the flags needed for training."""
parser = argparse.ArgumentParser(description=__doc__, allow_abbrev=False)
group = parser.add_argument_group('General Options')
opts.add_general_flags(group)
group = parser.add_argument_group('Dataset Options')
opts.add_dataset_flags(group)
group = parser.add_argument_group('Model Options')
opts.add_model_flags(group)
args = parser.parse_args(argv)
if args.model_state_file is None:
parser.error("You should set --model-state-file to reload a model "
"state.")
return args
def test_for_one_epoch(model, loss, test_loader, epoch_number):
model.eval()
loss.eval()
data_time_meter = utils.AverageMeter()
batch_time_meter = utils.AverageMeter()
loss_meter = utils.AverageMeter(recent=100)
top1_meter = utils.AverageMeter(recent=100)
top5_meter = utils.AverageMeter(recent=100)
timestamp = time.time()
for i, (images, labels) in enumerate(test_loader):
batch_size = images.size(0)
if utils.is_model_cuda(model):
images = images.cuda()
labels = labels.cuda()
# Record data time
data_time_meter.update(time.time() - timestamp)
# Forward pass without computing gradients.
with torch.no_grad():
outputs = model(images)
loss_output = loss(outputs, labels)
# Sometimes loss function returns a modified version of the output,
# which must be used to compute the model accuracy.
if isinstance(loss_output, tuple):
loss_value, outputs = loss_output
else:
loss_value = loss_output
# Record loss and model accuracy.
loss_meter.update(loss_value.item(), batch_size)
top1, top5 = utils.topk_accuracy(outputs, labels, recalls=(1, 5))
top1_meter.update(top1, batch_size)
top5_meter.update(top5, batch_size)
# Record batch time
batch_time_meter.update(time.time() - timestamp)
timestamp = time.time()
if i % 10 == 0:
logging.info(
'Epoch: [{epoch}][{batch}/{epoch_size}]\t'
'Time {batch_time.value:.2f} ({batch_time.average:.2f}) '
'Data {data_time.value:.2f} ({data_time.average:.2f}) '
'Loss {loss.value:.3f} {{{loss.average:.3f}, {loss.average_recent:.3f}}} '
'Top-1 {top1.value:.2f} {{{top1.average:.2f}, {top1.average_recent:.2f}}} '
'Top-5 {top5.value:.2f} {{{top5.average:.2f}, {top5.average_recent:.2f}}} '.format(
epoch=epoch_number, batch=i + 1, epoch_size=len(test_loader),
batch_time=batch_time_meter, data_time=data_time_meter,
loss=loss_meter, top1=top1_meter, top5=top5_meter))
# Log the overall test stats
logging.info(
'Epoch: [{epoch}] -- TESTING SUMMARY\t'
'Time {batch_time.sum:.2f} '
'Data {data_time.sum:.2f} '
'Loss {loss.average:.3f} '
'Top-1 {top1.average:.2f} '
'Top-5 {top5.average:.2f} '.format(
epoch=epoch_number, batch_time=batch_time_meter, data_time=data_time_meter,
loss=loss_meter, top1=top1_meter, top5=top5_meter))
def main(argv):
"""Run the test script with command line arguments @argv."""
args = parse_args(argv)
utils.general_setup(args.save, args.gpus)
logging.info("Arguments parsed.\n{}".format(pprint.pformat(vars(args))))
# Create the validation data loaders.
# val_loader = imagenet.get_val_loader(args.imagenet, args.batch_size,
# args.num_workers)
val_loader = mul_cifar100.mul_CIFAR100DataLoader(root=args.data_dir,
image_size=32, train=False, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
# Create model and the loss.
model, loss = model_factory.create_model(
args.model, args.model_state_file, args.gpus, coslinear=args.coslinear, scale=args.s)
logging.info("Model:\n{}".format(model))
# for n,p in model.named_parameters():
# print(n)
# Test for one epoch.
test_for_one_epoch(model, loss, val_loader, epoch_number=1)
print('\n')
if __name__ == '__main__':
main(sys.argv[1:])
| [
"torch.no_grad"
] | 0.4.0 | chrisqqq123/FA-Dist-EfficientNet | cb788b0f212d568d9bf04a51516d79fed5383585 |
1.0 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import gc
import inspect
import math
import os
import re
import shutil
import time
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
init_deepspeed,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .file_utils import (
WEIGHTS_NAME,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_distributed_available,
is_torch_tpu_available,
)
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedTensorGatherer,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
distributed_broadcast_scalars,
distributed_concat,
nested_concat,
nested_detach,
nested_numpify,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
import fairscale
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if version.parse(fairscale.__version__) >= version.parse("0.3"):
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
else:
FullyShardedDDP = None
if is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, torch.nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional["PreTrainedTokenizerBase"] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or (args.deepspeed and args.do_train)
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create output directory if needed
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
# Enforce rules on using datasets with no __len__
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
self._signature_columns = None
if is_datasets_available():
if isinstance(train_dataset, datasets.Dataset):
self._remove_unused_columns(self.train_dataset, description="training")
if isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(self.eval_dataset, description="evaluation")
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
self.scaler = ShardedGradScaler() if self.sharded_ddp is not None else torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the
# state at each call to self.log.
self._total_flos = None
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
dataset.set_format(type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"])
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Gather the number of processes and this process index.
if self.args.parallel_mode == ParallelMode.TPU:
num_processes = xm.xrt_world_size()
process_index = xm.get_ordinal()
elif (
self.args.parallel_mode == ParallelMode.DISTRIBUTED
or self.args.parallel_mode == ParallelMode.SAGEMAKER_DISTRIBUTED
):
num_processes = dist.get_world_size()
process_index = dist.get_rank()
else:
num_processes = 1
process_index = 0
# Build the sampler.
if self.args.group_by_length:
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if num_processes <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=num_processes,
rank=process_index,
model_input_name=model_input_name,
)
else:
if num_processes <= 1:
return RandomSampler(self.train_dataset)
else:
return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_sampler = self._get_train_sampler()
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
if is_torch_tpu_available():
return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
self._remove_unused_columns(eval_dataset, description="evaluation")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
self._remove_unused_columns(test_dataset, description="test")
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
""" HP search setup code """
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
self.model = model = FullyShardedDDP(
model, mixed_precision=mixed_precision, reshard_after_forward=zero_3, cpu_offload=cpu_offload
).to(self.args.device)
elif is_sagemaker_distributed_available():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
self.is_in_train = True
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(self.args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({self.args.output_dir})")
if resume_from_checkpoint is not None and os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
logger.info(f"Loading model from {resume_from_checkpoint}).")
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(resume_from_checkpoint)
model_reloaded = True
else:
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
)
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(self.args.num_train_epochs)
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if self.args.deepspeed:
model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)
self.model = model.module
self.model_wrapped = model # will get further wrapped in DDP
self.deepspeed = model # DeepSpeedEngine object
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
else:
world_size = 1
total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps * world_size
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not self.args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not self.args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch."
)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(self.args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not self.args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if train_dataset_is_sized
else self.args.max_steps * self.args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
and self.args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self._total_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= self.args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(self.args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(self.args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
self.args.max_grad_norm,
)
# Optimizer step
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
if not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(self.model, PreTrainedModel):
self.model = self.model.from_pretrained(self.state.best_model_checkpoint)
if self.place_model_on_device:
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
metrics = speed_metrics("train", start_time, self.state.max_steps)
if self._total_flos is not None:
self.store_flos()
metrics["total_flos"] = self.state.total_flos
self.log(metrics)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
if self.deepspeed:
# free up any memory that might be useful for eval
self.deepspeed = None
self.optimizer = None
self.lr_scheduler = None
self.model_wrapped = self.model
gc.collect() # force memory release
# to restore normal behavior outside of train replay the place_model_on_device logic w/o deepspeed
self.place_model_on_device = self.args.place_model_on_device
if self.is_model_parallel:
self.place_model_on_device = False
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.deepspeed:
# Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function
self.deepspeed.load_checkpoint(checkpoint, load_optimizer_states=True, load_lr_scheduler_states=True)
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or dist.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if is_torch_tpu_available():
self._save_tpu(output_dir)
else:
if self.is_world_process_zero():
self._save(output_dir)
if self.args.local_rank != -1:
dist.barrier()
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(output_dir, state_dict=self.model.state_dict())
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self._total_flos is not None:
if self.args.local_rank != -1:
self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()
else:
self.state.total_flos = self._total_flos
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (
checkpoints_sorted[-1],
checkpoints_sorted[best_model_index],
)
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):
raise ValueError("eval_dataset must implement __len__")
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
output = self.prediction_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(output.metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):
raise ValueError("test_dataset must implement __len__")
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
output = self.prediction_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
if self.args.deepspeed and not self.args.do_train:
# no harm, but flagging to the user that deepspeed config is ignored for eval
# flagging only for when --do_train wasn't passed as only then it's redundant
logger.info("Detected the deepspeed argument but it will not be used for evaluation")
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, half it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = 1
if is_torch_tpu_available():
world_size = xm.xrt_world_size()
elif self.args.local_rank != -1:
world_size = dist.get_world_size()
world_size = max(1, world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
preds_gatherer = DistributedTensorGatherer(world_size, num_examples)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.utils.data.dataloader.DataLoader",
"torch.utils.data.sampler.RandomSampler",
"torch.cuda.amp.autocast",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.sampler.SequentialSampler",
"torch.tensor",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.amp.GradScaler",
"torch.distributed.get_rank",
"torch.distributed.barrier",
"torch.distributed.get_local_rank",
"torch.nn.DataParallel"
] | 1.0 | silvershine157/transformers | fd01104435914dd65c34026dcec8be008c40ee60 |
1.0 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You ([email protected])
# Class Definition for GAN.
import time
import torch
from datasets.gan.data_loader import DataLoader
from methods.tools.runner_helper import RunnerHelper
from methods.tools.trainer import Trainer
from models.gan.model_manager import ModelManager
from utils.tools.average_meter import AverageMeter
from utils.tools.logger import Logger as Log
class ImageTranslator(object):
"""
The class for Pose Estimation. Include train, val, val & predict.
"""
def __init__(self, configer):
self.configer = configer
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.train_losses = AverageMeter()
self.val_losses = AverageMeter()
self.model_manager = ModelManager(configer)
self.seg_data_loader = DataLoader(configer)
self.gan_net = None
self.train_loader = None
self.val_loader = None
self.optimizer = None
self.scheduler = None
self.runner_state = dict()
self._init_model()
def _init_model(self):
self.gan_net = self.model_manager.gan_model()
self.gan_net = RunnerHelper.load_net(self, self.gan_net)
self.optimizer, self.scheduler = Trainer.init(self._get_parameters(), self.configer.get('solver'))
self.train_loader = self.seg_data_loader.get_trainloader()
self.val_loader = self.seg_data_loader.get_valloader()
def _get_parameters(self):
return self.gan_net.parameters()
def train(self):
"""
Train function of every epoch during train phase.
"""
self.gan_net.train()
start_time = time.time()
# Adjust the learning rate after every epoch.
for i, data_dict in enumerate(self.train_loader):
Trainer.update(self, solver_dict=self.configer.get('solver'))
inputs = data_dict['imgA']
self.data_time.update(time.time() - start_time)
# Forward pass.
out_dict = self.gan_net(data_dict)
# outputs = self.module_utilizer.gather(outputs)
loss = out_dict['loss'].mean()
self.train_losses.update(loss.item(), inputs.size(0))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update the vars of the train phase.
self.batch_time.update(time.time() - start_time)
start_time = time.time()
self.runner_state['iters'] += 1
# Print the log info & reset the states.
if self.runner_state['iters'] % self.configer.get('solver', 'display_iter') == 0:
Log.info('Train Epoch: {0}\tTrain Iteration: {1}\t'
'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t'
'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n'
'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
self.runner_state['epoch'], self.runner_state['iters'],
self.configer.get('solver', 'display_iter'),
RunnerHelper.get_lr(self.optimizer), batch_time=self.batch_time,
data_time=self.data_time, loss=self.train_losses))
self.batch_time.reset()
self.data_time.reset()
self.train_losses.reset()
if self.configer.get('solver', 'lr')['metric'] == 'iters' \
and self.runner_state['iters'] == self.configer.get('solver', 'max_iters'):
break
# Check to val the current model.
if self.runner_state['iters'] % self.configer.get('solver', 'test_interval') == 0:
self.val()
self.runner_state['epoch'] += 1
def val(self, data_loader=None):
"""
Validation function during the train phase.
"""
self.gan_net.eval()
start_time = time.time()
data_loader = self.val_loader if data_loader is None else data_loader
for j, data_dict in enumerate(data_loader):
inputs = data_dict['imgA']
with torch.no_grad():
# Forward pass.
out_dict = self.gan_net(data_dict)
# Compute the loss of the val batch.
self.val_losses.update(out_dict['loss'].mean().item(), inputs.size(0))
# Update the vars of the val phase.
self.batch_time.update(time.time() - start_time)
start_time = time.time()
RunnerHelper.save_net(self, self.gan_net,
val_loss=self.val_losses.avg)
# Print the log info & reset the states.
Log.info(
'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
'Loss {loss.avg:.8f}\n'.format(
batch_time=self.batch_time, loss=self.val_losses))
self.batch_time.reset()
self.val_losses.reset()
self.gan_net.train()
if __name__ == "__main__":
# Test class for pose estimator.
pass
| [
"torch.no_grad"
] | 1.0.1 | MendelXu/ANN | f4eabeb27dbba5c9bdcf83d03776bffa34995666 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import Namespace
from unittest import mock
import pytest
import torch
import yaml
from omegaconf import OmegaConf
from packaging.version import Version
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
@RunIf(min_torch="1.5.0")
def test_tensorboard_hparams_reload(tmpdir):
class CustomModel(BoringModel):
def __init__(self, b1=0.5, b2=0.999):
super().__init__()
self.save_hyperparameters()
trainer = Trainer(max_steps=1, default_root_dir=tmpdir)
model = CustomModel()
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
assert trainer.log_dir == trainer.logger.log_dir
folder_path = trainer.log_dir
# make sure yaml is there
with open(os.path.join(folder_path, "hparams.yaml")) as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
yaml_params = yaml.safe_load(file)
assert yaml_params["b1"] == 0.5
assert yaml_params["b2"] == 0.999
assert len(yaml_params.keys()) == 2
# verify artifacts
assert len(os.listdir(os.path.join(folder_path, "checkpoints"))) == 1
# verify tb logs
event_acc = EventAccumulator(folder_path)
event_acc.Reload()
data_pt_1_5 = b'\x12\x1b"\x04\n\x02b1"\x04\n\x02b2*\r\n\x0b\x12\thp_metric'
data_pt_1_6 = b'\x12\x1f"\x06\n\x02b1 \x03"\x06\n\x02b2 \x03*\r\n\x0b\x12\thp_metric'
hparams_data = data_pt_1_6 if Version(torch.__version__) >= Version("1.6.0") else data_pt_1_5
assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.plugin_name == 'hparams'
assert event_acc.summary_metadata['_hparams_/experiment'].plugin_data.content == hparams_data
def test_tensorboard_automatic_versioning(tmpdir):
"""Verify that automatic versioning works"""
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning")
assert logger.version == 2
def test_tensorboard_manual_versioning(tmpdir):
"""Verify that manual versioning works"""
root_dir = tmpdir / "tb_versioning"
root_dir.mkdir()
(root_dir / "version_0").mkdir()
(root_dir / "version_1").mkdir()
(root_dir / "version_2").mkdir()
logger = TensorBoardLogger(save_dir=tmpdir, name="tb_versioning", version=1)
assert logger.version == 1
def test_tensorboard_named_version(tmpdir):
"""Verify that manual versioning works for string versions, e.g. '2020-02-05-162402' """
name = "tb_versioning"
(tmpdir / name).mkdir()
expected_version = "2020-02-05-162402"
logger = TensorBoardLogger(save_dir=tmpdir, name=name, version=expected_version)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
assert logger.version == expected_version
assert os.listdir(tmpdir / name) == [expected_version]
assert os.listdir(tmpdir / name / expected_version)
@pytest.mark.parametrize("name", ["", None])
def test_tensorboard_no_name(tmpdir, name):
"""Verify that None or empty name works"""
logger = TensorBoardLogger(save_dir=tmpdir, name=name)
logger.log_hyperparams({"a": 1, "b": 2, 123: 3, 3.5: 4, 5j: 5}) # Force data to be written
assert logger.root_dir == tmpdir
assert os.listdir(tmpdir / "version_0")
def test_tensorboard_log_sub_dir(tmpdir):
class TestLogger(TensorBoardLogger):
# for reproducibility
@property
def version(self):
return "version"
@property
def name(self):
return "name"
trainer_args = dict(
default_root_dir=tmpdir,
max_steps=1,
)
# no sub_dir specified
save_dir = tmpdir / "logs"
logger = TestLogger(save_dir)
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version")
# sub_dir specified
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(save_dir, "name", "version", "sub_dir")
# test home dir (`~`) handling
save_dir = "~/tmp"
explicit_save_dir = os.path.expanduser(save_dir)
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
# test env var (`$`) handling
test_env_dir = "some_directory"
os.environ["test_env_dir"] = test_env_dir
save_dir = "$test_env_dir/tmp"
explicit_save_dir = f"{test_env_dir}/tmp"
logger = TestLogger(save_dir, sub_dir="sub_dir")
trainer = Trainer(**trainer_args, logger=logger)
assert trainer.logger.log_dir == os.path.join(explicit_save_dir, "name", "version", "sub_dir")
@pytest.mark.parametrize("step_idx", [10, None])
def test_tensorboard_log_metrics(tmpdir, step_idx):
logger = TensorBoardLogger(tmpdir)
metrics = {
"float": 0.3,
"int": 1,
"FloatTensor": torch.tensor(0.1),
"IntTensor": torch.tensor(1),
}
logger.log_metrics(metrics, step_idx)
def test_tensorboard_log_hyperparams(tmpdir):
logger = TensorBoardLogger(tmpdir)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
"a": {
"b": "c"
}
},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
}
logger.log_hyperparams(hparams)
def test_tensorboard_log_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
"a": {
"b": "c"
}
},
"list": [1, 2, 3],
"namespace": Namespace(foo=Namespace(bar="buzz")),
"layer": torch.nn.BatchNorm1d,
}
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
def test_tensorboard_log_omegaconf_hparams_and_metrics(tmpdir):
logger = TensorBoardLogger(tmpdir, default_hp_metric=False)
hparams = {
"float": 0.3,
"int": 1,
"string": "abc",
"bool": True,
"dict": {
"a": {
"b": "c"
}
},
"list": [1, 2, 3],
# "namespace": Namespace(foo=Namespace(bar="buzz")),
# "layer": torch.nn.BatchNorm1d,
}
hparams = OmegaConf.create(hparams)
metrics = {"abc": torch.tensor([0.54])}
logger.log_hyperparams(hparams, metrics)
@pytest.mark.parametrize("example_input_array", [None, torch.rand(2, 32)])
def test_tensorboard_log_graph(tmpdir, example_input_array):
""" test that log graph works with both model.example_input_array and
if array is passed externaly
"""
model = BoringModel()
if example_input_array is not None:
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
logger.log_graph(model, example_input_array)
def test_tensorboard_log_graph_warning_no_example_input_array(tmpdir):
""" test that log graph throws warning if model.example_input_array is None """
model = BoringModel()
model.example_input_array = None
logger = TensorBoardLogger(tmpdir, log_graph=True)
with pytest.warns(
UserWarning,
match='Could not log computational graph since the `model.example_input_array`'
' attribute is not set or `input_array` was not given'
):
logger.log_graph(model)
@mock.patch('pytorch_lightning.loggers.TensorBoardLogger.log_metrics')
def test_tensorboard_with_accummulated_gradients(mock_log_metrics, tmpdir):
"""Tests to ensure that tensorboard log properly when accumulated_gradients > 1"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.indexes = []
def training_step(self, *args):
self.log('foo', 1, on_step=True, on_epoch=True)
if not self.trainer.train_loop.should_accumulate():
if self.trainer.logger_connector.should_update_logs:
self.indexes.append(self.trainer.global_step)
return super().training_step(*args)
model = TestModel()
model.training_epoch_end = None
logger_0 = TensorBoardLogger(tmpdir, default_hp_metric=False)
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=12,
limit_val_batches=0,
max_epochs=3,
accumulate_grad_batches=2,
logger=[logger_0],
log_every_n_steps=3,
)
trainer.fit(model)
calls = [m[2] for m in mock_log_metrics.mock_calls]
count_epochs = [c["step"] for c in calls if "foo_epoch" in c["metrics"]]
assert count_epochs == [5, 11, 17]
count_steps = [c["step"] for c in calls if "foo_step" in c["metrics"]]
assert count_steps == model.indexes
@mock.patch('pytorch_lightning.loggers.tensorboard.SummaryWriter')
def test_tensorboard_finalize(summary_writer, tmpdir):
""" Test that the SummaryWriter closes in finalize. """
logger = TensorBoardLogger(save_dir=tmpdir)
logger.finalize("any")
summary_writer().flush.assert_called()
summary_writer().close.assert_called()
def test_tensorboard_save_hparams_to_yaml_once(tmpdir):
model = BoringModel()
logger = TensorBoardLogger(save_dir=tmpdir, default_hp_metric=False)
trainer = Trainer(max_steps=1, default_root_dir=tmpdir, logger=logger)
assert trainer.log_dir == trainer.logger.log_dir
trainer.fit(model)
hparams_file = "hparams.yaml"
assert os.path.isfile(os.path.join(trainer.log_dir, hparams_file))
assert not os.path.isfile(os.path.join(tmpdir, hparams_file))
@mock.patch('pytorch_lightning.loggers.tensorboard.log')
def test_tensorboard_with_symlink(log, tmpdir):
"""
Tests a specific failure case when tensorboard logger is used with empty name, symbolic link ``save_dir``, and
relative paths.
"""
os.chdir(tmpdir) # need to use relative paths
source = os.path.join('.', 'lightning_logs')
dest = os.path.join('.', 'sym_lightning_logs')
os.makedirs(source, exist_ok=True)
os.symlink(source, dest)
logger = TensorBoardLogger(save_dir=dest, name='')
_ = logger.version
log.warning.assert_not_called()
| [
"torch.rand",
"torch.tensor"
] | 1.4 | xxxhycl2010/pytorch-lightning | 7e18b118449133a5184b9014082ff1fb9818cf9b |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2Config,
GPT2DoubleHeadsModel,
GPT2ForSequenceClassification,
GPT2LMHeadModel,
GPT2Model,
GPT2Tokenizer,
)
class GPT2ModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=True,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = GPT2Config(
vocab_size=self.vocab_size,
n_embd=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
# intermediate_size=self.intermediate_size,
# hidden_act=self.hidden_act,
# hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
return_dict=True,
gradient_checkpointing=gradient_checkpointing,
)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_gpt2_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_gpt2_model_past(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids, token_type_ids=token_type_ids)
outputs_no_past = model(input_ids, token_type_ids=token_type_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_token_types = ids_tensor([self.batch_size, 1], self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)
output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"]
output_from_past = model(next_tokens, token_type_ids=next_token_types, past=past)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_gpt2_model_attention_mask_past(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_gpt2_model_past_large_inputs(
self, config, input_ids, input_mask, head_mask, token_type_ids, *args
):
model = GPT2Model(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, token_type_ids=token_type_ids, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_token_types = ids_tensor([self.batch_size, 3], self.type_vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_token_type_ids = torch.cat([token_type_ids, next_token_types], dim=-1)
output_from_no_past = model(next_input_ids, token_type_ids=next_token_type_ids)["last_hidden_state"]
output_from_past = model(next_tokens, token_type_ids=next_token_types, past=past)["last_hidden_state"]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2LMHeadModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_forward_and_backwards(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = GPT2LMHeadModel(config)
model.to(torch_device)
result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_double_lm_head_model(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, *args
):
model = GPT2DoubleHeadsModel(config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
inputs = {
"input_ids": multiple_choice_inputs_ids,
"mc_token_ids": mc_token_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
"labels": multiple_choice_inputs_ids,
}
result = model(**inputs)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_choices, self.seq_length, self.vocab_size)
)
self.parent.assertEqual(result.mc_logits.shape, (self.batch_size, self.num_choices))
def create_and_check_gpt2_for_sequence_classification(
self, config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, *args
):
config.num_labels = self.num_labels
model = GPT2ForSequenceClassification(config)
model.to(torch_device)
model.eval()
print(config.num_labels, sequence_labels.size())
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, GPT2ForSequenceClassification)
if is_torch_available()
else ()
)
all_generative_model_classes = (GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()
test_missing_keys = False
def setUp(self):
self.model_tester = GPT2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_gpt2_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model(*config_and_inputs)
def test_gpt2_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past(*config_and_inputs)
def test_gpt2_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_attention_mask_past(*config_and_inputs)
def test_gpt2_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_model_past_large_inputs(*config_and_inputs)
def test_gpt2_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_gpt2_double_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs)
def test_gpt2_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_gpt2_for_sequence_classification(*config_and_inputs)
def test_gpt2_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs(gradient_checkpointing=True)
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs)
@slow
def test_batch_generation(self):
model = GPT2LMHeadModel.from_pretrained("gpt2")
model.to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.padding_side = "left"
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
# use different length sentences to test batching
sentences = [
"Hello, my dog is a little",
"Today, I",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
torch.manual_seed(0)
outputs = model.generate(
input_ids=inputs["input_ids"].to(torch_device),
attention_mask=inputs["attention_mask"].to(torch_device),
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
"Hello, my dog is a little bit of a mess. I'm not sure if he's going",
"Today, I'm going to be doing a lot of research on this. I",
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
@slow
def test_model_from_pretrained(self):
for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = GPT2Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
class GPT2ModelLanguageGenerationTest(unittest.TestCase):
@slow
def test_lm_generate_gpt2(self):
for checkpointing in [True, False]:
model = GPT2LMHeadModel.from_pretrained("gpt2", gradient_checkpointing=checkpointing)
model.to(torch_device)
input_ids = torch.tensor([[464, 3290]], dtype=torch.long, device=torch_device) # The dog
expected_output_ids = [
464,
3290,
373,
1043,
287,
257,
2214,
1474,
262,
16246,
286,
2688,
290,
2688,
27262,
13,
198,
198,
464,
3290,
] # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
@slow
def test_lm_generate_distilgpt2(self):
model = GPT2LMHeadModel.from_pretrained("distilgpt2")
model.to(torch_device)
input_ids = torch.tensor([[464, 1893]], dtype=torch.long, device=torch_device) # The president
expected_output_ids = [
464,
1893,
286,
262,
1578,
1829,
11,
290,
262,
1893,
286,
262,
1578,
7526,
11,
423,
587,
287,
262,
2635,
] # The president of the United States, and the president of the United Kingdom, have been in the White
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| [
"torch.cat",
"torch.ones",
"torch.manual_seed",
"torch.tensor",
"torch.allclose"
] | 1.0 | katarinaslama/transformers-1 | a5a8eeb772b185b0746f3ce9be6ae43181d2ca71 |
1.9 | from logging import getLogger
from typing import Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.onnx import export
logger = getLogger(__name__)
def export_to_onnx(
model: nn.Module,
dummy_input: [Union[Tuple], torch.Tensor],
file,
opset_version: int = 12,
input_names: Optional[List[str]] = None,
output_names: Optional[List[str]] = None,
dynamic_axes: Dict[str, Dict[int, str]] = None
) -> None:
"""Exports PyTorch model to ONNX format.
Args:
model: PyTorch module.
dummy_input: Dummy input.
file: Path to save converted model or file-like object.
opset_version: Version of ONNX operator set. Defaults to 12.
input_names: Names of model inputs. Defaults to None.
output_names: Names of model outputs. Defaults to None.
dynamic_axes: Axes (input or/and outputs) with dynamic shapes.
Defaults to None.
Examples:
>>> from transformers import AutoModel, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
>>> model = AutoModel.from_pretrained('bert-base-uncased')
>>> encoded = tokenizer('aboba', return_tensors='np')
>>>
>>> export_to_onnx(
>>> model,
>>> dummy_input=tuple(encoded.values()),
>>> path_to_save='model.onnx',
>>> input_names=list(encoded.keys()),
>>> output_names=['last_hidden_state', 'pooler_output'],
>>> dynamic_axes={
>>> 'input_ids' : {0 : 'batch_size', 1: 'seq'},
>>> 'token_type_ids' : {0 : 'batch_size', 1: 'seq'},
>>> 'attention_mask' : {0 : 'batch_size', 1: 'seq'},
>>> 'last_hidden_state' : {0 : 'batch_size', 1: 'seq'},
>>> 'pooler_output' : {0 : 'batch_size', 1: 'seq'}
>>> }
>>> )
"""
model.eval()
export(
model,
dummy_input,
file,
opset_version=opset_version,
do_constant_folding=True,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes
)
logger.warning(f'Model was exported to ONNX.')
| [
"torch.onnx.export"
] | 1.9.0 | esceptico/squeezer | 98bc4c7923c6aa3b12ac81444d79392826fc34c6 |
1.7 | import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
from clinicaldg.eicu.data_extraction.data_extraction_mortality import data_extraction_mortality
import clinicaldg.eicu.Constants as Constants
from sklearn.preprocessing import StandardScaler, LabelEncoder
from torch.utils.data import ConcatDataset, Dataset
hospitals = pd.read_csv((Constants.eicu_dir/'hospital.csv'))
hospitals['region'] = hospitals['region'].fillna('Missing')
patients = pd.read_csv((Constants.eicu_dir/'patient.csv'))[['patientunitstayid', 'hospitalid', 'gender']]
class LabelEncoderExt(object):
'''
Label encoder, but when encountering an unseen label on the test set, will set to "Missing"
'''
def __init__(self):
self.label_encoder = LabelEncoder()
def fit(self, data_list):
self.label_encoder = self.label_encoder.fit(list(map(str, list(data_list))) + ['Missing'])
self.classes_ = self.label_encoder.classes_
return self
def transform(self, data_list):
data_list = list(map(str, list(data_list)))
for unique_item in np.unique(data_list):
if unique_item not in self.label_encoder.classes_:
data_list = ['Missing' if x==unique_item else x for x in data_list]
return self.label_encoder.transform(data_list)
class AugmentedDataset():
def __init__(self, augs = [], train_pct = 0.7, val_pct = 0.1):
self.reg_mort, self.reg_pat, self.scalers, self.labelencoders = self._get_mortality_data(train_pct, val_pct)
for a in augs:
a.augment(self.reg_mort, self.reg_pat)
def get_torch_dataset(self, envs, dset):
'''
envs: a list of region names
dset: one of ['train', 'val', 'test']. For the test environment, use "test" for dset
'''
datasets = []
for r in envs:
datasets.append(eICUDataset(self.reg_mort[r][self.reg_mort[r]['fold'] == dset], self.reg_pat[r][self.reg_pat[r]['fold'] == dset]))
return ConcatDataset(datasets)
def get_num_levels(self):
return ({i: len(self.labelencoders[i].classes_) for i in Constants.ts_cat_features},
{i: len(self.labelencoders[i].classes_) for i in Constants.static_cat_features},
)
def _get_mortality_data(self, train_pct, val_pct):
mort_df = data_extraction_mortality(str(Constants.benchmark_dir))
targets = mort_df.groupby('patientunitstayid').agg({'hospitaldischargestatus': 'first'}).reset_index()
pat_df = pd.merge(patients, hospitals, on = 'hospitalid', how = 'left')
pat_df = pd.merge(pat_df, targets, on = 'patientunitstayid', how = 'inner').rename(columns = {'hospitaldischargestatus': 'target'})
pat_df = pat_df[pat_df.patientunitstayid.isin(mort_df.patientunitstayid)].sample(frac = 1) # shuffle
pat_df['fold'] = ''
pat_df['fold'].iloc[:int(len(pat_df)*train_pct)] = 'train'
pat_df['fold'].iloc[int(len(pat_df)*train_pct):int(len(pat_df)*(train_pct + val_pct))] = 'val'
pat_df['fold'].iloc[int(len(pat_df)*(train_pct + val_pct)):] = 'test'
mort_df = mort_df.merge(pat_df[['patientunitstayid', 'fold']], on = 'patientunitstayid')
# make sure everyone has exactly 48h hours of data
## make multiindex with 48h
## groupby and ffill
## fill any remaining missing features with normal_values
iterables = [np.unique(mort_df['patientunitstayid']), list(range(1, mort_df.itemoffset.max()+1))]
multiind = pd.MultiIndex.from_product(iterables, names = ['patientunitstayid', 'itemoffset'])
ind_df = pd.DataFrame(index = multiind)
mort_df = pd.merge(ind_df, mort_df, left_index = True, right_on = ['patientunitstayid', 'itemoffset'], how = 'left')
mort_df = mort_df.set_index(['patientunitstayid', 'itemoffset']).sort_index().groupby('patientunitstayid').ffill()
for back_col in ['hospitaldischargestatus', 'fold'] + Constants.static_cont_features + Constants.static_cat_features:
mort_df[back_col] = mort_df[back_col].fillna(method = 'backfill')
for feat, val in Constants.normal_values.items():
mort_df[feat] = mort_df[feat].fillna(val)
# scale continuous and static ts features
scalers = {}
for feat in Constants.ts_cont_features + Constants.static_cont_features:
scalers[feat] = StandardScaler().fit(mort_df.loc[mort_df.fold == 'train', feat].values.reshape(-1, 1))
mort_df[feat] = scalers[feat].transform(mort_df[feat].values.reshape(-1, 1))[:, 0]
# encode continuous and static cat features
labelencoders, num_encodings = {}, {}
for feat in Constants.ts_cat_features + Constants.static_cat_features:
mort_df[feat] = mort_df[feat].fillna('Missing')
labelencoders[feat] = LabelEncoderExt().fit(mort_df.loc[mort_df.fold == 'train', feat])
mort_df[feat] = labelencoders[feat].transform(mort_df[feat])
num_encodings[feat] = len(labelencoders[feat].classes_)
reg_mort, reg_pat = {}, {}
for reg in pat_df.region.unique():
sub_pat = pat_df[pat_df.region == reg]
sub = mort_df[mort_df.index.get_level_values(0).isin(sub_pat.patientunitstayid)]
reg_mort[reg] = sub
reg_pat[reg] = sub_pat.set_index('patientunitstayid')
return reg_mort, reg_pat, scalers, labelencoders
class eICUDataset(Dataset):
def __init__(self, mort_df, pat_df):
self.mort_df = mort_df
self.pat_df = pat_df
def __len__(self):
return self.pat_df.shape[0]
def __getitem__(self, idx):
pat_id = self.pat_df.index[idx]
mort_data = self.mort_df.loc[pat_id]
ts_cont_feats = mort_data[Constants.ts_cont_features].values
ts_cat_feats = mort_data[Constants.ts_cat_features].values
static_not_in_mort = [i for i in Constants.static_cont_features if i not in self.mort_df]
static_in_mort = [i for i in Constants.static_cont_features if i in self.mort_df]
static_cont_feats = np.concatenate((mort_data[static_in_mort].iloc[0].values, self.pat_df.loc[pat_id, static_not_in_mort].values)).astype(float)
static_cat_feats = mort_data[Constants.static_cat_features].iloc[0].values
return ({'pat_id': pat_id,
'ts_cont_feats': ts_cont_feats,
'ts_cat_feats': ts_cat_feats,
'static_cont_feats': static_cont_feats,
'static_cat_feats': static_cat_feats,
'gender': int(self.pat_df.loc[pat_id, 'gender'].strip() == 'Male')},
self.pat_df.loc[pat_id, 'target']) | [
"torch.utils.data.ConcatDataset"
] | 1.7.0 | MLforHealth/ClinicalDG | 2de4a8e155231f07d80036504a6f49b50004654e |
1.4 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import os
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import fairscale.optim as optim
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO # type: ignore
DEVICE = "cuda" if torch.cuda.is_available() else torch.device("cpu")
def setup_module(module):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
dist.init_process_group(backend=BACKEND, rank=0, world_size=1)
def dist_init(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29501"
dist.init_process_group(backend=BACKEND, rank=rank, world_size=world_size)
def test_create():
params = [torch.rand(1)]
o = optim.OSS(params, lr=0.01)
def test_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1, momentum=0.9)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
o.zero_grad()
o.consolidate_state_dict() # Sync state dict in between replicas - even if there are none
state_dict = o.state_dict()
# Check that the state dict is pytorch-compliant key wise
assert "param_groups" in state_dict.keys()
assert "state" in state_dict.keys()
# Check that the pulled state is what we expect, and that we have all the expected keys
assert state_dict["param_groups"][0]["lr"] == 0.1
assert state_dict["param_groups"][0]["momentum"] == 0.9
assert not state_dict["param_groups"][0]["nesterov"]
assert state_dict["param_groups"][0]["weight_decay"] == 0.0
assert state_dict["param_groups"][0]["dampening"] == 0.0
# Check that the pulled state and the .param_groups attribute are in sync
for k in state_dict["param_groups"][0].keys():
if k != "params":
assert state_dict["param_groups"][0][k] == o.param_groups[0][k]
# Check that it's correctly loaded
o = optim.OSS([x], lr=0.01)
o.load_state_dict(state_dict)
# Check that state is correct and on proper device
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.0], device=DEVICE)
# We should now be using a lr of 0.1, both within the optimizer
# and as exposed by the .param_groups attribute
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.71], device=DEVICE)
assert o.optim.state[x]["momentum_buffer"] == torch.tensor([1.9], device=DEVICE)
# Check that the exposed param_groups are on the proper device
assert o.param_groups[0]["params"][0].device == x.device
def test_lr_scheduler():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
x2 = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
assert x == x2
def test_step_with_kwargs():
class SGDWithStepKWArg(torch.optim.SGD):
def step(self, closure=None, kwarg=[]):
super().step()
kwarg.append(5)
kwarg = []
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithStepKWArg, lr=0.1)
x.backward()
o.step(0, kwarg=kwarg)
assert kwarg == [5]
assert x == torch.tensor([0.9], device=DEVICE)
def test_step_without_closure():
class SGDWithoutClosure(torch.optim.SGD):
def step(self):
return super().step()
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], SGDWithoutClosure, lr=0.1)
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.local_state_dict()
o = optim.OSS([x], lr=0.01)
o.load_local_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def test_implicit_local_state_dict():
x = torch.tensor([1.0], device=DEVICE, requires_grad=True)
o = optim.OSS([x], lr=0.1)
local_state_dict = o.state_dict()
o = optim.OSS([x], lr=0.01)
o.load_state_dict(local_state_dict)
# We should now be using a lr of 0.1.
assert o.optim.param_groups[0]["lr"] == 0.1
assert o.param_groups[0]["lr"] == 0.1
x.backward()
o.step()
assert x == torch.tensor([0.9], device=DEVICE)
def run_test_add_param_group(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [4, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert len(o.param_groups) == 1
o.add_param_group({"params": [torch.rand(3, 1)]})
assert len(o.param_groups) == 2
# Verify that added group is added to the correct partition making all have 8 elements.
assert sum([x.numel() for g in o.optim.param_groups for x in g["params"]]) == 8
assert len(o.optim.param_groups) == 2
def test_add_param_group():
world_size = 3
mp.spawn(run_test_add_param_group, args=(world_size,), nprocs=world_size, join=True)
def run_test_zero_grad(rank, world_size):
dist_init(rank, world_size)
x = torch.rand(1)
m = torch.nn.Linear(1, 1)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
assert m.weight.grad
assert m.bias.grad
o.zero_grad()
assert not m.weight.grad
assert not m.bias.grad
def test_zero_grad():
world_size = 2
mp.spawn(run_test_zero_grad, args=(world_size,), nprocs=world_size, join=True)
def run_test_step(rank, world_size):
dist_init(rank, world_size)
x = torch.tensor([float(rank + 1)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
o.step()
assert m.weight == torch.tensor([[0.75]], device=rank)
assert m.bias == torch.tensor([1.85], device=rank)
@skip_if_no_cuda
def test_step():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step, args=(world_size,), nprocs=world_size, join=True)
def run_test_step_with_closure(rank, world_size, optimizer=None):
dist_init(rank, world_size)
x_val = rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor([x_val * weight + bias + error], device=rank)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=rank)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(rank)
o = optim.OSS(m.parameters(), lr=0.1)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
assert loss == torch.tensor(error, device=rank)
assert m.weight == torch.tensor([[1.1]], device=rank)
assert m.bias == torch.tensor([2.1], device=rank)
@skip_if_no_cuda
def test_step_with_closure():
world_size = min(2, torch.cuda.device_count())
mp.spawn(run_test_step_with_closure, args=(world_size,), nprocs=world_size, join=True)
def run_test_sharding(rank, world_size):
dist_init(rank, world_size)
params = []
for size in [5, 4, 2, 6, 4, 3]:
params.append(torch.rand(size, 1))
o = optim.OSS(params, lr=0.1)
assert sum([x.numel() for x in o.optim.param_groups[0]["params"]]) == 8
def test_sharding():
world_size = 3
mp.spawn(run_test_sharding, args=(world_size,), nprocs=world_size, join=True)
def run_test_collect_shards(rank, world_size, reference_rank):
dist_init(rank, world_size)
device = torch.device(rank) if torch.cuda.device_count() > 1 else DEVICE
# Run a dummy step so that the optimizer state dict exists
batch, input_width, hidden, target_width = 3, 20, 10, 5
target = torch.rand((batch, target_width), device=device)
inputs = torch.rand((batch, input_width), device=device)
model = torch.nn.Sequential(torch.nn.Linear(input_width, hidden), torch.nn.Linear(hidden, target_width))
model.to(device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(device)
# With SGD, Momentum is required to get a state to shard
optimizer = optim.OSS(model.parameters(), lr=0.1, momentum=0.99)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
_ = optimizer.step(closure=closure)
# Update the optimizer state on the reference rank
optimizer.consolidate_state_dict(recipient_rank=reference_rank)
# Fetch the state on the reference rank
# - check that it has the correct size
# - load it again
if rank == reference_rank:
optimizer_state_dict = optimizer.state_dict()
assert len(optimizer_state_dict["state"]) == world_size
else:
optimizer_state_dict = {}
optimizer_state_dict = optim.utils.broadcast_object(
optimizer_state_dict, src_rank=reference_rank, group=dist.group.WORLD, dist_device=device
)
# Load the optimizer state dict
optimizer.load_state_dict(optimizer_state_dict)
def test_collect_shards():
world_size = 3
if torch.cuda.is_available():
world_size = min(world_size, torch.cuda.device_count())
reference_rank = 0
mp.spawn(
run_test_collect_shards, args=(world_size, reference_rank), nprocs=world_size, join=True,
)
| [
"torch.nn.Linear",
"torch.device",
"torch.rand",
"torch.optim.lr_scheduler.StepLR",
"torch.distributed.init_process_group",
"torch.optim.SGD",
"torch.multiprocessing.spawn",
"torch.nn.L1Loss",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.tensor",
"torch.distributed.all_reduce"
] | 1.4.0 | joshim5/fairscale | 1c2a6f6b46646866f3e86d628b8a4ca437f68215 |
1.8 | import torch
import torch.nn as nn
from utils import binary_accuracy
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
only_one_iteration = False
def train(model, iterator, optimizer, criterion, device):
epoch_loss = 0
epoch_acc = 0
precission = 0
recall = 0
f1 = 0
model.train()
for i, batch in enumerate(iterator):
optimizer.zero_grad()
predictions = model(batch.text, device).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += float(loss.item())
epoch_acc += float(acc.item())
pr_epoch, rec_epoch, f1_epoch, _ = precision_recall_fscore_support(batch.label.cpu().detach().numpy(), torch.round(torch.sigmoid(predictions)).cpu().detach().numpy(),average='macro')
precission += pr_epoch
recall += rec_epoch
f1 += f1_epoch
if only_one_iteration:
break
return epoch_loss / len(iterator), epoch_acc / len(iterator), precission/ len(iterator), recall/ len(iterator), f1/ len(iterator)
def evaluate(model, iterator, criterion, device):
epoch_loss = 0
epoch_acc = 0
precission = 0
recall = 0
f1 = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text, device).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
pr_epoch, rec_epoch, f1_epoch, _ = precision_recall_fscore_support(batch.label.cpu().detach().numpy(), torch.round(torch.sigmoid(predictions)).cpu().detach().numpy(),average='micro')
precission += pr_epoch
recall += rec_epoch
f1 += f1_epoch
if only_one_iteration:
break
return epoch_loss / len(iterator), epoch_acc / len(iterator), precission/ len(iterator), recall/ len(iterator), f1/ len(iterator)
| [
"torch.sigmoid",
"torch.no_grad"
] | 1.8.1 | trusthlt/dp-across-nlp-tasks | ec3e03511420044cdb0bb1a3574925d354ff03f4 |
1.5 | import os
import time
import ujson as json
import torch
import sys
import pickle
import numpy as np
from torch.utils.data import Dataset
import torch.distributed as dist
import torch.nn.functional as F
from bootleg.symbols.alias_entity_table import AliasEntityTable
from bootleg.symbols.constants import *
from bootleg.prep import prep_data
from bootleg.utils import logging_utils, data_utils, train_utils
from bootleg.utils.utils import import_class
from bootleg.utils import utils
# https://github.com/pytorch/pytorch/issues/37581#issuecomment-624516586
import warnings
warnings.filterwarnings("ignore", message=".*The given NumPy array is not writeable.*")
class WikiDataset(Dataset):
"""
Main dataset class that handles preparing a batch of input.
Things to note
**Input is a sentence with mentions that are both true and false golds. A true gold is one that was directly
mined with Wikipedia. A false gold is one that was generated by weak labelling.
**We determine entities that are in a slice by if the true entity index is -1 or not. During train, if use_weak_label is true,
we allow the model to leverage true and false golds. During eval, we only score true enchors.
**Some embeddings require more expensive processing. E.g., extracting the pairs of candidate entities that are connected
in a KG. When this processing is done in the dataloader where is can benefit from multiple dataloader threads,
the embedding is stored in batch_on_the_fly. This embedding must have a batch_prep method
When this processing is done during data prep, the embedding is stored in batch_prep.
**If training a NIL model, we support randomly removing the true entity from the candidate list and setting the true
entity index to be the NIL entity.
**We support data slices (subsets of data) for both training (if slice model) and eval. If using slices for training model,
we supports probabilistic slice indices.
Attributes:
batch_prepped_emb_file_names: embedding that are batch prepped in advance
batch_on_the_fly_embs: embedding where the batch_prep method is called in the __get_item__ method
random_nil: whether to do NIL candidate random generation
Batch Inputs:
start_idx_in_sent: first token index of a mention,
end_idx_in_sent: last token index of a mention,
alias_idx: the alias (mention) index in our alias dictionary,
word_indices: word indexes into the word emeddings (e.g., BERT token indices),
sent_idx: unique sentence index,
subsent_idx: unique subsentence index in the case of sentence windowing,
entity_indices: the entity indices in our entity dictionary,
alias_list_pos: keeps track of the original alias position in the list of all aliases in case the sentence
is split via windowing
true_entity_idx_for_train: entity indices for true and false golds, as seen during train
slice_indices (optional): if slice dataset, we pass in matrix where each row is alias and each column
is 0/1 if that mention is in the slice or not
<ind_task_name> (option): probabilistic labels of if an mention is in a slice or not (used in slicing model)
<pred_task_name>: NED prediction labels; for slice model, predictions of aliases not in the slice are masked
<embs>: all batch prep or batch on the fly emeddings
"""
def __init__(self, args, use_weak_label, input_src, dataset_name,
is_writer, distributed, word_symbols, entity_symbols,
slice_dataset=None, dataset_is_eval=False):
# Need to save args to reinstantiate logger
self.args = args
self.logger = logging_utils.get_logger(args)
# Number of candidates, including NIL if a NIL model (train_in_candidates is False)
self.K = entity_symbols.max_candidates + (not args.data_config.train_in_candidates)
self.num_entities_with_pad_and_nocand = entity_symbols.num_entities_with_pad_and_nocand
self.dataset_name = dataset_name
self.slice_dataset = slice_dataset
self.dataset_is_eval = dataset_is_eval
# Slice names used for eval slices and a slicing model
self.slice_names = train_utils.get_data_slices(args, dataset_is_eval)
self.storage_type_file = data_utils.get_storage_file(self.dataset_name)
# Mappings from sent_idx to row_id in dataset
self.sent_idx_file = os.path.splitext(dataset_name)[0] + "_sent_idx.json"
self.type_pred = False
if args.data_config.type_prediction.use_type_pred:
self.type_pred = True
self.eid2typeid, self.num_types_with_pad = self.load_coarse_type_table(args, entity_symbols)
# Load memory mapped file
self.logger.info("Loading dataset...")
self.logger.debug("Seeing if " + dataset_name + " exists")
if (args.data_config.overwrite_preprocessed_data or
(not os.path.exists(self.dataset_name)) or
(not os.path.exists(self.sent_idx_file)) or
(not os.path.exists(self.storage_type_file)) or
(not os.path.exists(data_utils.get_batch_prep_config(self.dataset_name)))):
start = time.time()
self.logger.debug(f"Building dataset with {input_src}")
# Only prep data once per node
if is_writer:
prep_data(args, use_weak_label=use_weak_label, dataset_is_eval=self.dataset_is_eval,
input_src=input_src, dataset_name=dataset_name,
prep_dir=data_utils.get_data_prep_dir(args))
if distributed:
# Make sure all processes wait for data to be created
dist.barrier()
self.logger.debug(f"Finished building and saving dataset in {round(time.time() - start, 2)}s.")
start = time.time()
# Storage type for loading memory mapped file of dataset
self.storage_type = pickle.load(open(self.storage_type_file, 'rb'))
self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')
self.data_len = len(self.data)
# Mapping from sentence idx to rows in the dataset (indices).
# Needed when sampling sentence indices from slices for evaluation.
sent_idx_to_idx_str = utils.load_json_file(self.sent_idx_file)
self.sent_idx_to_idx = {int(i):val for i,val in sent_idx_to_idx_str.items()}
self.logger.info(f"Finished loading dataset.")
# Stores info about the batch prepped embedding memory mapped files and their shapes and datatypes
# so we can load them
self.batch_prep_config = utils.load_json_file(data_utils.get_batch_prep_config(self.dataset_name))
self.batch_prepped_emb_files = {}
self.batch_prepped_emb_file_names = {}
for emb in args.data_config.ent_embeddings:
if 'batch_prep' in emb and emb['batch_prep']:
assert emb.key in self.batch_prep_config, f'Need to prep {emb.key}. Please call prep instead of run with batch_prep_embeddings set to true.'
self.batch_prepped_emb_file_names[emb.key] = os.path.join(os.path.dirname(self.dataset_name),
os.path.basename(self.batch_prep_config[emb.key]['file_name']))
self.batch_prepped_emb_files[emb.key] = np.memmap(
self.batch_prepped_emb_file_names[emb.key],
dtype=self.batch_prep_config[emb.key]['dtype'],
shape=tuple(self.batch_prep_config[emb.key]['shape']),
mode='r')
assert len(self.batch_prepped_emb_files[emb.key]) == self.data_len,\
f'Preprocessed emb data file {self.batch_prep_config[emb.key]["file_name"]} does not match length of main data file.'
# Stores embeddings that we compute on the fly; these are embeddings where batch_on_the_fly is set to true.
self.batch_on_the_fly_embs = {}
for emb in args.data_config.ent_embeddings:
if 'batch_on_the_fly' in emb and emb['batch_on_the_fly'] is True:
mod, load_class = import_class("bootleg.embeddings", emb.load_class)
try:
self.batch_on_the_fly_embs[emb.key] = getattr(mod, load_class)(main_args=args,
emb_args=emb['args'], entity_symbols=entity_symbols,
model_device=None, word_symbols=None, key=emb.key)
except AttributeError as e:
self.logger.warning(f'No prep method found for {emb.load_class} with error {e}')
except Exception as e:
print("ERROR", e)
# The data in this table shouldn't be pickled since we delete it in the class __getstate__
self.alias2entity_table = AliasEntityTable(args=args, entity_symbols=entity_symbols)
# Random NIL percent
self.mask_perc = args.train_config.random_nil_perc
self.random_nil = False
# Don't want to random mask for eval
if not dataset_is_eval:
# Whether to use a random NIL training regime
self.random_nil = args.train_config.random_nil
if self.random_nil:
self.logger.info(f'Using random nils during training with {self.mask_perc} percent')
def __len__(self):
return self.data_len
def __getitem__(self, key):
# start = time.time()
example = self.data[key]
entity_indices = self.alias2entity_table(example['alias_idx'])
# True entities will be true and false golds for train (if use_weak_label in config is true) and just true golds for eval
true_entities = torch.from_numpy(example['true_entity_idx'])
M = true_entities.shape
if self.random_nil:
# example['true_entity_idx'] is M -> we want to sample some % of these and set them to not in candidate list
# randomly mask each entity embedding
bern_prob = (torch.ones(M) * self.mask_perc)
keep_mask = torch.bernoulli(bern_prob) < 1
# whichever we sample, we want to set corresponding true candidate to -1 and mask it out
# to simulate not being in the candidate list
# can't have negatives for one hot so we temporarily cast padded values to 0
padded_entities = true_entities == -1
true_entities = true_entities.masked_fill(padded_entities, 0)
one_hot_true_entities = F.one_hot(true_entities, num_classes=self.K)
one_hot_true_entities[keep_mask.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0
one_hot_true_entities[padded_entities.unsqueeze(-1).expand_as(one_hot_true_entities)] = 0
entity_indices = entity_indices.masked_fill(one_hot_true_entities, -1)
# set new true label to 0 ('not in candidate')
true_entities = true_entities.masked_fill(~keep_mask, 0)
# make sure original padded entities are padded
true_entities = true_entities.masked_fill(padded_entities, -1)
start_idx_in_sent = example['start_idx_in_sent']
end_idx_in_sent = example['end_idx_in_sent']
example_dict = {'start_idx_in_sent': start_idx_in_sent,
'end_idx_in_sent': end_idx_in_sent,
'alias_idx': example['alias_idx'],
'word_indices': example['word_indices'],
'sent_idx': example['sent_idx'],
'subsent_idx': example['subsent_idx'],
'entity_indices': entity_indices,
# due to subsentence split, we need to keep track of the original alias position in the list
# to do eval over slices when distributed
# (examples from a sentence may be distributed across different GPUs)
'alias_list_pos': example['alias_list_pos'],
# true entities of the mentions seen during train (true and false golds); in eval, we only keep
# true entities of true golds
'true_entity_idx_for_train': example['true_entity_idx_for_train']}
# If this dataset is associated with slices, slice_indices is a incidence matrix indicating
# for each alias in the batch, which ones participate in which slice (slices keep track of sentence indexes and aliases to predict)
# Slices are not windowed like that are for training data.
if self.slice_dataset is not None:
# -1 is pad and should not be in the mapping from sentence index to row in array.
assert -1 != self.slice_dataset.sent_idx_arr[example["sent_idx"]]
# One row per mention and one column per slice
slice_indices = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example["sent_idx"]]].alias_to_predict.T
for slice_name in self.slice_names])
prob_labels_arr = np.hstack([self.slice_dataset.data[slice_name][self.slice_dataset.sent_idx_arr[example["sent_idx"]]].prob_labels.T
for slice_name in self.slice_names])
# alias_list_pos will have -1 for no alias; we want these to become zero in slice_indices.
# Therefore we add a pad row to the bottom of slice_indices
slice_indices = np.vstack([slice_indices, np.zeros(slice_indices.shape[1])]).astype(int)
slice_indices = slice_indices[example['alias_list_pos']]
# Probabilistic slice labels for slice indicator head training
prob_labels_arr = np.vstack([prob_labels_arr, np.zeros(prob_labels_arr.shape[1])]).astype(float)
prob_labels_arr = prob_labels_arr[example['alias_list_pos']]
# If this is an eval dataset, keep slice indices intact for eval_wrapper
example_dict['slice_indices'] = slice_indices
# Assign true entity idx to -1 if example alias doesn't participate in slice
for i, slice_name in enumerate(self.slice_names):
prob_labels = prob_labels_arr[:,i]
bin_in_slice_labels = slice_indices[:,i]
# NED prediction labels; set predictions to be -1 for masking for mentions not in a slice
pred_labels = np.copy(true_entities)
pred_labels[~(bin_in_slice_labels).astype(bool)] = -1
# Mask out slice alias labels for which we don't want to make a prediction
# We need to use true_entity_idx to account for subsentences which indicate
# which alias to predict
prob_labels[true_entities == -1] = -1
ind_task_name = train_utils.get_slice_head_ind_name(slice_name)
pred_task_name = train_utils.get_slice_head_pred_name(slice_name)
# Add indicator head and prediction head labels
example_dict[ind_task_name] = prob_labels
example_dict[pred_task_name] = pred_labels
else:
example_dict[train_utils.get_slice_head_pred_name(FINAL_LOSS)] = example['true_entity_idx']
# Add type preds
if self.type_pred:
example_dict["type_labels"] = self.eid2typeid[true_entities]
# Add embeddings to example forward
for emb_name in self.batch_prepped_emb_files:
example_dict[emb_name] = np.asarray(self.batch_prepped_emb_files[emb_name][key])
# Prep the embeddings (this will call the batch_prep method for the embedding)
for emb_name, emb in self.batch_on_the_fly_embs.items():
example_dict[emb_name] = emb.batch_prep(example['alias_idx'], entity_indices)
return example_dict
def __getstate__(self):
state = self.__dict__.copy()
# Not picklable
del state['data']
del state['logger']
# the sent_idx mapping is expensive to pickle so remove
# also not needed in dataloader workers so we don't need to setstate for it
del state['sent_idx_to_idx']
del state['batch_prepped_emb_files']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.data = np.memmap(self.dataset_name, dtype=self.storage_type, mode='r')
self.batch_prepped_emb_files = {}
for emb_name, file_name in self.batch_prepped_emb_file_names.items():
self.batch_prepped_emb_files[emb_name] = np.memmap(self.batch_prepped_emb_file_names[emb_name],
dtype=self.batch_prep_config[emb_name]['dtype'],
shape=tuple(self.batch_prep_config[emb_name]['shape']),
mode='r')
self.logger = logging_utils.get_logger(self.args)
def __repr__(self):
return f"Dataset {self.dataset_name}"
def load_coarse_type_table(self, args, entity_symbols):
emb_dir = args.data_config.emb_dir
coarse_type_file = args.data_config.type_prediction.file
with open(os.path.join(emb_dir, coarse_type_file)) as in_f:
# take the first type; UNK type is 0
qid2type = {}
max_type = 0
for k, v in json.load(in_f).items():
if len(v) > 0:
qid2type[k] = v[0]+1
else:
qid2type[k] = 0
max_type = max(max_type, qid2type[k])
# We assume types are indexed from 0. So, 6 types will have indices 0 - 5. Max type will get 5+1 = 6.
assert max_type == args.data_config.type_prediction.num_types,\
f"{args.data_config.type_prediction.num_types} from args.data_config.type_prediction.num_types must match our computed number {max_type}"
# All qids get unk types
values = [0 for _ in range(self.num_entities_with_pad_and_nocand)]
for qid in qid2type:
if entity_symbols.qid_exists(qid):
values[entity_symbols.get_eid(qid)] = qid2type[qid]
# Padded eid gets -1
values[-1] = -1
num_types_with_pad = max_type+1
eid2coarsetype = torch.tensor(values)
return eid2coarsetype, num_types_with_pad
| [
"torch.nn.functional.one_hot",
"torch.from_numpy",
"torch.ones",
"torch.tensor",
"torch.bernoulli",
"torch.distributed.barrier"
] | 1.5.0 | mleszczy/bootleg | 162d74001cdfbbe146753393641d549e0328acb1 |
1.0 | """
"""
from __future__ import division
from torch.optim.optimizer import Optimizer, required
import numpy as np
import torch
from typing import NamedTuple, List
from dataclasses import dataclass
from enum import Enum
from typing import Union, Tuple
# from scipy.sparse.linalg import svds
from scipy.optimize import minimize_scalar
class LayerType(Enum):
CONV = 1
FC = 2
NON_CONV = 3
@dataclass
class LayerMetrics:
rank: float
KG: float
condition: float
@dataclass
class ConvLayerMetrics:
input_channel: LayerMetrics
output_channel: LayerMetrics
class LRMetrics(NamedTuple):
rank_velocity: List[float]
r_conv: List[float]
def EVBMF(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational
Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to
empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix
factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free
energy.
If H is unspecified, it is set to the smallest of the sides of the
input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of
fully-observed variational Bayesian matrix factorization." Journal of
Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by
variational Bayesian PCA." Advances in Neural Information Processing
Systems. 2012.
"""
L, M = Y.shape # has to be L<=M
if H is None:
H = L
alpha = L / M
tauubar = 2.5129 * np.sqrt(alpha)
# SVD of the input matrix, max rank of H
# U, s, V = np.linalg.svd(Y)
U, s, V = torch.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
# Calculate residual
residual = 0.
if H < L:
# residual = np.sum(np.sum(Y**2)-np.sum(s**2))
residual = torch.sum(np.sum(Y**2) - np.sum(s**2))
# Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
xubar = (1 + tauubar) * (1 + alpha / tauubar)
eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1
# upper_bound = (np.sum(s**2)+residual)/(L*M)
# lower_bound = np.max(
# [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M])
upper_bound = (torch.sum(s**2) + residual) / (L * M)
lower_bound = torch.max(torch.stack(
[s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0))
scale = 1. # /lower_bound
s = s * np.sqrt(scale)
residual = residual * scale
lower_bound = lower_bound * scale
upper_bound = upper_bound * scale
sigma2_opt = minimize_scalar(
EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar),
bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()],
method='Bounded')
sigma2 = sigma2_opt.x
# Threshold gamma term
threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar))
# pos = np.sum(s > threshold)
pos = torch.sum(s > threshold)
# Formula (15) from [2]
# d = torch.multiply(s[:pos]/2,
# 1-torch.divide(
# torch.tensor((L+M)*sigma2, device=s.device),
# s[:pos]**2) + torch.sqrt((1-torch.divide(
# torch.tensor(
# (L+M)*sigma2, device=s.device),
# s[:pos]**2))**2 -
# 4*L*M*sigma2**2/s[:pos]**4))
# d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt(
# (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4))
d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2
+ torch.sqrt((1 -
(L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4))
# Computation of the posterior
# post = {}
# post['ma'] = np.zeros(H)
# post['mb'] = np.zeros(H)
# post['sa2'] = np.zeros(H)
# post['sb2'] = np.zeros(H)
# post['cacb'] = np.zeros(H)
# tau = np.multiply(d, s[:pos])/(M*sigma2)
# delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau)
# post['ma'][:pos] = np.sqrt(np.multiply(d, delta))
# post['mb'][:pos] = np.sqrt(np.divide(d, delta))
# post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])
# post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
# post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M))
# post['sigma2'] = sigma2
# post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) +
# (residual+np.sum(s**2))/sigma2 + np.sum(
# M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau))
return U[:, :pos], torch.diag(d), V[:, :pos] # , post
def EVBsigma2(sigma2, L, M, s, residual, xubar):
H = len(s)
alpha = L / M
x = s**2 / (M * sigma2)
z1 = x[x > xubar]
z2 = x[x <= xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1)))
term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1))
obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2)
return obj
def phi0(x):
return x - np.log(x)
def phi1(x, alpha):
return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1
) - tau(x, alpha)
def tau(x, alpha):
return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha))
class Metrics:
def __init__(self, params, linear: bool = False) -> None:
'''
parameters: list of torch.nn.Module.parameters()
'''
self.params = params
self.history = list()
mask = list()
for param_idx, param in enumerate(params):
param_shape = param.shape
if not linear:
if len(param_shape) != 4:
mask.append(param_idx)
else:
if len(param_shape) != 4 and len(param_shape) != 2:
mask.append(param_idx)
self.mask = set(mask)
def compute_low_rank(self,
tensor: torch.Tensor,
normalizer: float) -> torch.Tensor:
if tensor.requires_grad:
tensor = tensor.detach()
try:
tensor_size = tensor.shape
if tensor_size[0] > tensor_size[1]:
tensor = tensor.T
U_approx, S_approx, V_approx = EVBMF(tensor)
except RuntimeError:
return None, None, None
rank = S_approx.shape[0] / tensor_size[0] # normalizer
low_rank_eigen = torch.diag(S_approx).data.cpu().numpy()
if len(low_rank_eigen) != 0:
condition = low_rank_eigen[0] / low_rank_eigen[-1]
sum_low_rank_eigen = low_rank_eigen / \
max(low_rank_eigen)
sum_low_rank_eigen = np.sum(sum_low_rank_eigen)
else:
condition = 0
sum_low_rank_eigen = 0
KG = sum_low_rank_eigen / tensor_size[0] # normalizer
return rank, KG, condition
def KG(self, epoch: int) -> np.ndarray:
KG_list = list()
for i, (index, metric) in enumerate(self.history[epoch]):
if isinstance(metric, ConvLayerMetrics):
KG_list.append((metric.input_channel.KG
+ metric.output_channel.KG) / 2)
elif isinstance(metric, LayerMetrics):
KG_list.append(metric.KG)
return np.array(KG_list)
def __call__(self) -> List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]]:
'''
Computes the knowledge gain (S) and mapping condition (condition)
'''
metrics: List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]] = list()
for layer_index, layer in enumerate(self.params):
if layer_index in self.mask:
metrics.append((layer_index, None))
continue
# if np.less(np.prod(layer.shape), 10_000):
# metrics.append((layer_index, None))
if len(layer.shape) == 4:
layer_tensor = layer.data
tensor_size = layer_tensor.shape
mode_3_unfold = layer_tensor.permute(1, 0, 2, 3)
mode_3_unfold = torch.reshape(
mode_3_unfold, [tensor_size[1], tensor_size[0]
* tensor_size[2] * tensor_size[3]])
mode_4_unfold = layer_tensor
mode_4_unfold = torch.reshape(
mode_4_unfold, [tensor_size[0], tensor_size[1]
* tensor_size[2] * tensor_size[3]])
in_rank, in_KG, in_condition = self.compute_low_rank(
mode_3_unfold, tensor_size[1])
if in_rank is None and in_KG is None and in_condition is None:
if len(self.history) > 0:
in_rank = self.history[-1][
layer_index][1].input_channel.rank
in_KG = self.history[-1][
layer_index][1].input_channel.KG
in_condition = self.history[-1][
layer_index][1].input_channel.condition
else:
in_rank = in_KG = in_condition = 0.
out_rank, out_KG, out_condition = self.compute_low_rank(
mode_4_unfold, tensor_size[0])
if out_rank is None and out_KG is None and out_condition is None:
if len(self.history) > 0:
out_rank = self.history[-1][
layer_index][1].output_channel.rank
out_KG = self.history[-1][
layer_index][1].output_channel.KG
out_condition = self.history[-1][
layer_index][1].output_channel.condition
else:
out_rank = out_KG = out_condition = 0.
metrics.append((layer_index, ConvLayerMetrics(
input_channel=LayerMetrics(
rank=in_rank,
KG=in_KG,
condition=in_condition),
output_channel=LayerMetrics(
rank=out_rank,
KG=out_KG,
condition=out_condition))))
elif len(layer.shape) == 2:
rank, KG, condition = self.compute_low_rank(
layer, layer.shape[0])
if rank is None and KG is None and condition is None:
if len(self.history) > 0:
rank = self.history[-1][layer_index][1].rank
KG = self.history[-1][layer_index][1].KG
condition = self.history[-1][layer_index][1].condition
else:
rank = KG = condition = 0.
metrics.append((layer_index, LayerMetrics(
rank=rank,
KG=KG,
condition=condition)))
else:
metrics.append((layer_index, None))
self.history.append(metrics)
return metrics
class Adas(Optimizer):
"""
Vectorized SGD from torch.optim.SGD
"""
def __init__(self,
params,
lr: float = required,
beta: float = 0.8,
step_size: int = None,
linear: bool = True,
gamma: float = 1,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(Adas, self).__init__(params[:2], defaults)
# Adas Specific stuff (not SGD)
if np.less(beta, 0) or np.greater_equal(beta, 1):
raise ValueError(f'Invalid beta: {beta}')
if np.less(gamma, 0):
raise ValueError(f'Invalid gamma: {gamma}')
if step_size is not None:
if np.less_equal(step_size, 0):
raise ValueError(f'Invalid step_size: {step_size}')
self.step_size = step_size
self.gamma = gamma
self.beta = beta
self.metrics = metrics = Metrics(params=params[2]["all_params"], linear=linear)
self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))
self.velocity = np.zeros(
len(self.metrics.params) - len(self.metrics.mask))
self.not_ready = list(range(len(self.velocity)))
self.init_lr = lr
self.zeta = 1.
self.KG = 0.
def __setstate__(self, state):
super(Adas, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def epoch_step(self, epoch: int) -> None:
self.metrics()
if epoch == 0:
velocity = self.init_lr * np.ones(len(self.velocity))
self.KG = self.metrics.KG(epoch)
else:
KG = self.metrics.KG(epoch)
velocity = KG - self.KG
self.KG = KG
for idx in self.not_ready:
if np.isclose(KG[idx], 0.):
velocity[idx] = self.init_lr - \
self.beta * self.velocity[idx]
else:
self.not_ready.remove(idx)
if self.step_size is not None:
if epoch % self.step_size == 0 and epoch > 0:
self.lr_vector *= self.gamma
self.zeta *= self.gamma
self.velocity = np.maximum(
self.beta * self.velocity + self.zeta * velocity, 0.)
count = 0
for i in range(len(self.metrics.params)):
if i in self.metrics.mask:
self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)]
else:
self.lr_vector[i] = self.velocity[count]
count += 1
def step(self, closure: callable = None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
iteration_group = 0
for group in self.param_groups:
iteration_group += 1
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p_index, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# p.data.add_(-group['lr'], d_p)
p.data.add_(d_p, alpha=-self.lr_vector[p_index])
return loss
| [
"torch.reshape",
"torch.sqrt",
"torch.svd",
"torch.diag",
"torch.clone",
"torch.mean",
"torch.sum"
] | 1.0 | MathieuTuli/transformers | da3db8ba7a18deed492808b0d6c5d29669241fa0 |
1.7 | # coding: UTF-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import warnings
warnings.filterwarnings("ignore")
import argparse
import numpy as np
import shutil
import PIL
import time
from imageio import imread, imsave
from googletrans import Translator
import torch
import torchvision
import torch.nn.functional as F
from torchvision import transforms as T
import clip
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from clip_fft import to_valid_rgb, fft_image, resume_fft, pixel_image
from utils import slice_imgs, derivat, sim_func, slerp, basename, file_list, img_list, img_read, pad_up_to, txt_clean, latent_anima, cvshow, checkout, save_cfg, old_torch
import transforms
try: # progress bar for notebooks
get_ipython().__class__.__name__
from progress_bar import ProgressIPy as ProgressBar
except: # normal console
from progress_bar import ProgressBar
clip_models = ['ViT-B/16', 'ViT-B/32', 'RN50', 'RN50x4', 'RN50x16', 'RN101']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', default='1280-720', help='Output resolution')
parser.add_argument('-t', '--in_txt', default=None, help='Text string or file to process (main topic)')
parser.add_argument('-pre', '--in_txt_pre', default=None, help='Prefix for input text')
parser.add_argument('-post', '--in_txt_post', default=None, help='Postfix for input text')
parser.add_argument('-t2', '--in_txt2', default=None, help='Text string or file to process (style)')
parser.add_argument('-t0', '--in_txt0', default=None, help='input text to subtract')
parser.add_argument('-im', '--in_img', default=None, help='input image or directory with images')
parser.add_argument('-w0', '--weight0', default=0.3, type=float, help='weight for subtraction')
parser.add_argument('-w2', '--weight2', default=0.5, type=float, help='weight for style')
parser.add_argument('-wi', '--weight_img', default=0.5, type=float, help='weight for images')
parser.add_argument('-r', '--resume', default=None, help='Resume from saved params or from an image')
parser.add_argument( '--out_dir', default='_out')
parser.add_argument('-tr', '--translate', action='store_true', help='Translate with Google Translate')
parser.add_argument( '--invert', action='store_true', help='Invert criteria')
parser.add_argument('-v', '--verbose', default=True, type=bool)
# training
parser.add_argument( '--gen', default='RGB', help='Generation (optimization) method: FFT or RGB')
parser.add_argument('-m', '--model', default='ViT-B/32', choices=clip_models, help='Select CLIP model to use')
parser.add_argument( '--steps', default=300, type=int, help='Iterations (frames) per scene (text line)')
parser.add_argument( '--samples', default=100, type=int, help='Samples to evaluate per frame')
parser.add_argument('-lr', '--lrate', default=1, type=float, help='Learning rate')
# motion
parser.add_argument('-opt', '--opt_step', default=1, type=int, help='How many optimizing steps per save/transform step')
parser.add_argument('-sm', '--smooth', action='store_true', help='Smoothen interframe jittering for FFT method')
parser.add_argument('-it', '--interpol', default=True, help='Interpolate topics? (or change by cut)')
parser.add_argument( '--fstep', default=100, type=int, help='How many frames before changing motion')
parser.add_argument( '--scale', default=0.012, type=float)
parser.add_argument( '--shift', default=10., type=float, help='in pixels')
parser.add_argument( '--angle', default=0.8, type=float, help='in degrees')
parser.add_argument( '--shear', default=0.4, type=float)
parser.add_argument( '--anima', default=True, help='Animate motion')
# tweaks
parser.add_argument('-a', '--align', default='overscan', choices=['central', 'uniform', 'overscan', 'overmax'], help='Sampling distribution')
parser.add_argument('-tf', '--transform', default='custom', choices=['none', 'custom', 'elastic'], help='use augmenting transforms?')
parser.add_argument( '--contrast', default=1.2, type=float)
parser.add_argument( '--colors', default=2, type=float)
parser.add_argument('-sh', '--sharp', default=None, type=float)
parser.add_argument('-mc', '--macro', default=0.4, type=float, help='Endorse macro forms 0..1 ')
parser.add_argument('-e', '--enforce', default=0, type=float, help='Enforce details (by boosting similarity between two parallel samples)')
parser.add_argument('-x', '--expand', default=0, type=float, help='Boosts diversity (by enforcing difference between prev/next samples)')
parser.add_argument('-n', '--noise', default=2., type=float, help='Add noise to make composition sparse (FFT only)') # 0.04
parser.add_argument( '--sim', default='mix', help='Similarity function (angular/spherical/mixed; None = cossim)')
parser.add_argument( '--rem', default=None, help='Dummy text to add to project name')
a = parser.parse_args()
if a.size is not None: a.size = [int(s) for s in a.size.split('-')][::-1]
if len(a.size)==1: a.size = a.size * 2
a.gen = a.gen.upper()
a.invert = -1. if a.invert is True else 1.
# Overriding some parameters, depending on other settings
if a.gen == 'RGB':
a.smooth = False
a.align = 'overscan'
if a.sharp is None: a.sharp = -1. if a.gen == 'RGB' else 1.
if a.model == 'ViT-B/16': a.sim = 'cossim'
return a
def frame_transform(img, size, angle, shift, scale, shear):
if old_torch(): # 1.7.1
img = T.functional.affine(img, angle, shift, scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR)
img = T.functional.center_crop(img, size)
img = pad_up_to(img, size)
else: # 1.8+
img = T.functional.affine(img, angle, shift, scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR)
img = T.functional.center_crop(img, size) # on 1.8+ also pads
return img
def main():
a = get_args()
# Load CLIP models
model_clip, _ = clip.load(a.model, jit=old_torch())
try:
a.modsize = model_clip.visual.input_resolution
except:
a.modsize = 288 if a.model == 'RN50x4' else 384 if a.model == 'RN50x16' else 224
if a.verbose is True: print(' using model', a.model)
xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}
if a.model in xmem.keys():
a.samples = int(a.samples * xmem[a.model])
if a.translate:
translator = Translator()
if a.enforce != 0:
a.samples = int(a.samples * 0.5)
if 'elastic' in a.transform:
trform_f = transforms.transforms_elastic
a.samples = int(a.samples * 0.95)
elif 'custom' in a.transform:
trform_f = transforms.transforms_custom
a.samples = int(a.samples * 0.95)
else:
trform_f = transforms.normalize()
def enc_text(txt):
if a.translate:
txt = translator.translate(txt, dest='en').text
emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77])
return emb.detach().clone()
def enc_image(img_file):
img_t = torch.from_numpy(img_read(img_file)/255.).unsqueeze(0).permute(0,3,1,2).cuda()[:,:3,:,:]
in_sliced = slice_imgs([img_t], a.samples, a.modsize, transforms.normalize(), a.align)[0]
emb = model_clip.encode_image(in_sliced)
return emb.detach().clone()
# Encode inputs
count = 0
texts = []
styles = []
images = []
if a.in_txt is not None:
if os.path.isfile(a.in_txt):
with open(a.in_txt, 'r', encoding="utf-8") as f:
texts = f.readlines()
texts = [tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#']
else:
texts = [a.in_txt]
if a.in_txt_pre is not None:
texts = [' '.join([a.in_txt_pre, tt]).strip() for tt in texts]
if a.in_txt_post is not None:
texts = [' '.join([tt, a.in_txt_post]).strip() for tt in texts]
key_txt_encs = [enc_text(txt) for txt in texts]
count = max(count, len(key_txt_encs))
if a.in_txt2 is not None:
if os.path.isfile(a.in_txt2):
with open(a.in_txt2, 'r', encoding="utf-8") as f:
styles = f.readlines()
styles = [tt.strip() for tt in styles if len(tt.strip()) > 0 and tt[0] != '#']
else:
styles = [a.in_txt2]
key_styl_encs = [enc_text(style) for style in styles]
count = max(count, len(key_styl_encs))
if a.in_img is not None and os.path.exists(a.in_img):
images = file_list(a.in_img) if os.path.isdir(a.in_img) else [a.in_img]
key_img_encs = [enc_image(image) for image in images]
count = max(count, len(key_img_encs))
assert count > 0, "No inputs found!"
if a.in_txt0 is not None:
if a.verbose is True: print(' subtract text:', a.in_txt0)
if a.translate:
a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
# if a.verbose is True: print(' translated to:', a.in_txt0)
anti_txt_encs = [enc_text(txt) for txt in a.in_txt0.split('.')]
if a.verbose is True: print(' samples:', a.samples)
global params_tmp
shape = [1, 3, *a.size]
if a.gen == 'RGB':
params_tmp, _, sz = pixel_image(shape, a.resume)
params_tmp = params_tmp[0].cuda().detach()
else:
params_tmp, sz = resume_fft(a.resume, shape, decay=1.5, sd=1)
if sz is not None: a.size = sz
# [glob]steps = for save/move, opt_steps = for optimization cycle
steps = a.steps
glob_steps = count * steps
opt_steps = steps * a.opt_step
if glob_steps == a.fstep: a.fstep = glob_steps // 2 # otherwise no motion
workname = basename(a.in_txt) if a.in_txt is not None else basename(a.in_img)
workname = txt_clean(workname)
workdir = os.path.join(a.out_dir, workname)
if a.rem is not None: workdir += '-%s' % a.rem
if 'RN' in a.model.upper(): workdir += '-%s' % a.model
if a.noise > 0: workdir += '-n%.2g' % a.noise
if a.macro > 0: workdir += '-m%.2g' % a.macro
if a.smooth is True: workdir += '-sm'
if a.transform != 'custom': workdir += '-tf%s' % a.transform
if a.gen == 'RGB': workdir += '-rgb'
tempdir = os.path.join(workdir, 'ttt')
os.makedirs(tempdir, exist_ok=True)
save_cfg(a, workdir)
if a.in_txt is not None and os.path.isfile(a.in_txt):
shutil.copy(a.in_txt, os.path.join(workdir, os.path.basename(a.in_txt)))
if a.in_txt2 is not None and os.path.isfile(a.in_txt2):
shutil.copy(a.in_txt2, os.path.join(workdir, os.path.basename(a.in_txt2)))
midp = 0.5
if a.anima:
if a.gen == 'RGB': # zoom in
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[-0.3], verbose=False)
m_scale = 1 + (m_scale + 0.3) * a.scale
else:
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[0.6], verbose=False)
m_scale = 1 - (m_scale-0.6) * a.scale
m_shift = latent_anima([2], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp,midp], verbose=False)
m_angle = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shear = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[midp], verbose=False)
m_shift = (midp-m_shift) * a.shift * abs(m_scale-1) / a.scale
m_angle = (midp-m_angle) * a.angle * abs(m_scale-1) / a.scale
m_shear = (midp-m_shear) * a.shear * abs(m_scale-1) / a.scale
def get_encs(encs, num):
cnt = len(encs)
if cnt == 0: return []
enc_1 = encs[min(num, cnt-1)]
enc_2 = encs[min(num+1, cnt-1)]
return slerp(enc_1, enc_2, opt_steps)
prev_enc = 0
def process(num):
global params_tmp, opt_state, params, image_f, optimizer
if a.interpol is True: # linear topics interpolation
txt_encs = get_encs(key_txt_encs, num)
styl_encs = get_encs(key_styl_encs, num)
img_encs = get_encs(key_img_encs, num)
else: # change by cut
txt_encs = [key_txt_encs[min(num, len(key_txt_encs)-1)][0]] * opt_steps if len(key_txt_encs) > 0 else []
styl_encs = [key_styl_encs[min(num, len(key_styl_encs)-1)][0]] * opt_steps if len(key_styl_encs) > 0 else []
img_encs = [key_img_encs[min(num, len(key_img_encs)-1)][0]] * opt_steps if len(key_img_encs) > 0 else []
if a.verbose is True:
if len(texts) > 0: print(' ref text: ', texts[min(num, len(texts)-1)][:80])
if len(styles) > 0: print(' ref style: ', styles[min(num, len(styles)-1)][:80])
if len(images) > 0: print(' ref image: ', basename(images[min(num, len(images)-1)])[:80])
pbar = ProgressBar(steps)
for ii in range(opt_steps):
glob_step = num * steps + ii // a.opt_step # save/transform
loss = 0
txt_enc = txt_encs[ii % len(txt_encs)].unsqueeze(0) if len(txt_encs) > 0 else None
styl_enc = styl_encs[ii % len(styl_encs)].unsqueeze(0) if len(styl_encs) > 0 else None
img_enc = img_encs[ii % len(img_encs)].unsqueeze(0) if len(img_encs) > 0 else None
# MOTION: transform frame, reload params
if ii % a.opt_step == 0:
scale = m_scale[glob_step] if a.anima else 1 + a.scale
shift = tuple(m_shift[glob_step]) if a.anima else [0, a.shift]
angle = m_angle[glob_step][0] if a.anima else a.angle
shear = m_shear[glob_step][0] if a.anima else a.shear
if a.gen == 'RGB':
img_tmp = frame_transform(params_tmp, a.size, angle, shift, scale, shear)
params, image_f, _ = pixel_image([1, 3, *a.size], resume=img_tmp)
else: # FFT
if old_torch(): # 1.7.1
img_tmp = torch.irfft(params_tmp, 2, normalized=True, signal_sizes=a.size)
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.rfft(img_tmp, 2, normalized=True)
else: # 1.8+
if type(params_tmp) is not torch.complex64:
params_tmp = torch.view_as_complex(params_tmp)
img_tmp = torch.fft.irfftn(params_tmp, s=a.size, norm='ortho')
img_tmp = frame_transform(img_tmp, a.size, angle, shift, scale, shear)
params_tmp = torch.fft.rfftn(img_tmp, s=a.size, dim=[2,3], norm='ortho')
params_tmp = torch.view_as_real(params_tmp)
params, image_f, _ = fft_image([1, 3, *a.size], sd=1, resume=params_tmp)
optimizer = torch.optim.Adam(params, a.lrate)
# optimizer = torch.optim.AdamW(params, a.lrate, weight_decay=0.01, amsgrad=True)
image_f = to_valid_rgb(image_f, colors = a.colors)
del img_tmp
if a.smooth is True and num + ii > 0:
optimizer.load_state_dict(opt_state)
noise = a.noise * (torch.rand(1, 1, a.size[0], a.size[1]//2+1, 1)-0.5).cuda() if a.noise>0 else 0.
img_out = image_f(noise)
img_sliced = slice_imgs([img_out], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc = model_clip.encode_image(img_sliced)
if a.gen == 'RGB': # empirical hack
loss += 1.66 * abs(img_out.mean((2,3)) - 0.45).sum() # fix brightness
loss += 1.66 * abs(img_out.std((2,3)) - 0.17).sum() # fix contrast
if txt_enc is not None:
loss -= a.invert * sim_func(txt_enc, out_enc, a.sim)
if styl_enc is not None:
loss -= a.weight2 * sim_func(styl_enc, out_enc, a.sim)
if img_enc is not None:
loss -= a.weight_img * sim_func(img_enc, out_enc, a.sim)
if a.in_txt0 is not None: # subtract text
for anti_txt_enc in anti_txt_encs:
loss += 0.3 * sim_func(anti_txt_enc, out_enc, a.sim)
if a.sharp != 0: # scharr|sobel|naive
loss -= a.sharp * derivat(img_out, mode='naive')
if a.enforce != 0:
img_sliced = slice_imgs([image_f(noise)], a.samples, a.modsize, trform_f, a.align, a.macro)[0]
out_enc2 = model_clip.encode_image(img_sliced)
loss -= a.enforce * sim_func(out_enc, out_enc2, a.sim)
del out_enc2; torch.cuda.empty_cache()
if a.expand > 0:
global prev_enc
if ii > 0:
loss += a.expand * sim_func(prev_enc, out_enc, a.sim)
prev_enc = out_enc.detach().clone()
del img_out, img_sliced, out_enc; torch.cuda.empty_cache()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if ii % a.opt_step == a.opt_step-1:
params_tmp = params[0].detach().clone()
if a.smooth is True:
opt_state = optimizer.state_dict()
if ii % a.opt_step == 0:
with torch.no_grad():
img_t = image_f(contrast=a.contrast)[0].permute(1,2,0)
img = torch.clip(img_t*255, 0, 255).cpu().numpy().astype(np.uint8)
imsave(os.path.join(tempdir, '%06d.jpg' % glob_step), img, quality=95)
if a.verbose is True: cvshow(img)
del img, img_t
pbar.upd()
params_tmp = params[0].detach().clone()
glob_start = time.time()
try:
for i in range(count):
process(i)
except KeyboardInterrupt:
pass
os.system('ffmpeg -v warning -y -i %s/\%%06d.jpg "%s.mp4"' % (tempdir, os.path.join(workdir, workname)))
if __name__ == '__main__':
main()
| [
"torch.rfft",
"torch.rand",
"torch.view_as_real",
"torch.no_grad",
"torch.optim.Adam",
"torch.irfft",
"torch.fft.irfftn",
"torch.view_as_complex",
"torch.clip",
"torch.cuda.empty_cache",
"torch.fft.rfftn"
] | 1.7.1 | ksburaya/aphantasia | de9d430dee7108abfcb1b19eb2d8d806b8e5d899 |
0.4 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.loss.loss_helper import FSAuxCELoss, FSAuxRMILoss
from lib.utils.tools.logger import Logger as Log
class PixelContrastLoss(nn.Module, ABC):
def __init__(self, configer):
super(PixelContrastLoss, self).__init__()
self.configer = configer
self.temperature = self.configer.get('contrast', 'temperature')
self.base_temperature = self.configer.get('contrast', 'base_temperature')
self.ignore_label = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
self.ignore_label = self.configer.get('loss', 'params')['ce_ignore_index']
self.max_samples = self.configer.get('contrast', 'max_samples')
self.max_views = self.configer.get('contrast', 'max_views')
def _hard_anchor_sampling(self, X, y_hat, y):
batch_size, feat_dim = X.shape[0], X.shape[-1]
classes = []
total_classes = 0
for ii in range(batch_size):
this_y = y_hat[ii]
this_classes = torch.unique(this_y)
this_classes = [x for x in this_classes if x > 0 and x != self.ignore_label]
this_classes = [x for x in this_classes if (this_y == x).nonzero().shape[0] > self.max_views]
classes.append(this_classes)
total_classes += len(this_classes)
if total_classes == 0:
return None, None
n_view = self.max_samples // total_classes
n_view = min(n_view, self.max_views)
X_ = torch.zeros((total_classes, n_view, feat_dim), dtype=torch.float).cuda()
y_ = torch.zeros(total_classes, dtype=torch.float).cuda()
X_ptr = 0
for ii in range(batch_size):
this_y_hat = y_hat[ii]
this_y = y[ii]
this_classes = classes[ii]
for cls_id in this_classes:
hard_indices = ((this_y_hat == cls_id) & (this_y != cls_id)).nonzero()
easy_indices = ((this_y_hat == cls_id) & (this_y == cls_id)).nonzero()
num_hard = hard_indices.shape[0]
num_easy = easy_indices.shape[0]
if num_hard >= n_view / 2 and num_easy >= n_view / 2:
num_hard_keep = n_view // 2
num_easy_keep = n_view - num_hard_keep
elif num_hard >= n_view / 2:
num_easy_keep = num_easy
num_hard_keep = n_view - num_easy_keep
elif num_easy >= n_view / 2:
num_hard_keep = num_hard
num_easy_keep = n_view - num_hard_keep
else:
Log.info('this shoud be never touched! {} {} {}'.format(num_hard, num_easy, n_view))
raise Exception
perm = torch.randperm(num_hard)
hard_indices = hard_indices[perm[:num_hard_keep]]
perm = torch.randperm(num_easy)
easy_indices = easy_indices[perm[:num_easy_keep]]
indices = torch.cat((hard_indices, easy_indices), dim=0)
X_[X_ptr, :, :] = X[ii, indices, :].squeeze(1)
y_[X_ptr] = cls_id
X_ptr += 1
return X_, y_
def _contrastive(self, feats_, labels_):
anchor_num, n_view = feats_.shape[0], feats_.shape[1]
labels_ = labels_.contiguous().view(-1, 1)
mask = torch.eq(labels_, torch.transpose(labels_, 0, 1)).float().cuda()
contrast_count = n_view
contrast_feature = torch.cat(torch.unbind(feats_, dim=1), dim=0)
anchor_feature = contrast_feature
anchor_count = contrast_count
anchor_dot_contrast = torch.div(torch.matmul(anchor_feature, torch.transpose(contrast_feature, 0, 1)),
self.temperature)
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
mask = mask.repeat(anchor_count, contrast_count)
neg_mask = 1 - mask
logits_mask = torch.ones_like(mask).scatter_(1,
torch.arange(anchor_num * anchor_count).view(-1, 1).cuda(),
0)
mask = mask * logits_mask
neg_logits = torch.exp(logits) * neg_mask
neg_logits = neg_logits.sum(1, keepdim=True)
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits + neg_logits)
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.mean()
return loss
def forward(self, feats, labels=None, predict=None):
labels = labels.unsqueeze(1).float().clone()
labels = torch.nn.functional.interpolate(labels,
(feats.shape[2], feats.shape[3]), mode='nearest')
labels = labels.squeeze(1).long()
assert labels.shape[-1] == feats.shape[-1], '{} {}'.format(labels.shape, feats.shape)
batch_size = feats.shape[0]
labels = labels.contiguous().view(batch_size, -1)
predict = predict.contiguous().view(batch_size, -1)
feats = feats.permute(0, 2, 3, 1)
feats = feats.contiguous().view(feats.shape[0], -1, feats.shape[-1])
feats_, labels_ = self._hard_anchor_sampling(feats, labels, predict)
loss = self._contrastive(feats_, labels_)
return loss
class ContrastAuxCELoss(nn.Module, ABC):
def __init__(self, configer=None):
super(ContrastAuxCELoss, self).__init__()
self.configer = configer
ignore_index = -1
if self.configer.exists('loss', 'params') and 'ce_ignore_index' in self.configer.get('loss', 'params'):
ignore_index = self.configer.get('loss', 'params')['ce_ignore_index']
Log.info('ignore_index: {}'.format(ignore_index))
self.loss_weight = self.configer.get('contrast', 'loss_weight')
self.use_rmi = self.configer.get('contrast', 'use_rmi')
if self.use_rmi:
self.seg_criterion = FSAuxRMILoss(configer=configer)
else:
self.seg_criterion = FSAuxCELoss(configer=configer)
self.contrast_criterion = PixelContrastLoss(configer=configer)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
assert "seg" in preds
assert "seg_aux" in preds
seg = preds['seg']
seg_aux = preds['seg_aux']
embedding = preds['embedding'] if 'embedding' in preds else None
pred = F.interpolate(input=seg, size=(h, w), mode='bilinear', align_corners=True)
pred_aux = F.interpolate(input=seg_aux, size=(h, w), mode='bilinear', align_corners=True)
loss = self.seg_criterion([pred_aux, pred], target)
if embedding is not None:
_, predict = torch.max(seg, 1)
loss_contrast = self.contrast_criterion(embedding, target, predict)
return loss + self.loss_weight * loss_contrast
return loss
| [
"torch.zeros",
"torch.cat",
"torch.unique",
"torch.arange",
"torch.max",
"torch.unbind",
"torch.nn.functional.interpolate",
"torch.randperm",
"torch.ones_like",
"torch.transpose",
"torch.log",
"torch.exp"
] | 0.4.1 | wenguanwang/ContrastiveSeg | 9a381b9799c16d81e18d8f9f25ab509b93fb56de |
1.3 | # Copyright (c) 2021 Sen Wu. All Rights Reserved.
"""Helper function to set random seed for reproducibility of models."""
import logging
import random
from typing import Optional
import numpy as np
import torch
logger = logging.getLogger(__name__)
def set_random_seed(seed: Optional[int] = None) -> None:
"""Set random seed for random, numpy, and pytorch.
Args:
seed: The random seed, defaults to `None` which select it randomly.
"""
max_value = np.iinfo(np.uint32).max
min_value = np.iinfo(np.uint32).min
try:
seed = int(seed)
logger.info(f"Set random seed to {seed}.")
except (TypeError, ValueError):
seed = random.randint(min_value, max_value)
logger.info(f"No random seed specified, randomly set random seed to {seed}.")
if not (min_value <= seed <= max_value):
new_seed = random.randint(min_value, max_value)
logger.info(
f"Random seed {seed} is not valid, randomly set random seed to {new_seed}."
)
seed = new_seed
# Set random seed for random
random.seed(seed)
# Set random seed for all numpy operations
np.random.seed(seed=seed)
# Set random seed for PyTorch
torch.manual_seed(seed)
| [
"torch.manual_seed"
] | 1.3.1 | KeAWang/emmental | dae9f9fbba944f7c8404ab85aa9296545db1b82b |
0.3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
Batch acquisition functions using the reparameterization trick in combination
with (quasi) Monte-Carlo sampling. See [Rezende2014reparam]_ and
[Wilson2017reparam]_
.. [Rezende2014reparam]
D. J. Rezende, S. Mohamed, and D. Wierstra. Stochastic backpropagation and
approximate inference in deep generative models. ICML 2014.
.. [Wilson2017reparam]
J. T. Wilson, R. Moriconi, F. Hutter, and M. P. Deisenroth.
The reparameterization trick for acquisition functions. ArXiv 2017.
"""
import math
from abc import ABC, abstractmethod
from typing import Optional, Union
import torch
from torch import Tensor
from ..exceptions.errors import UnsupportedError
from ..models.model import Model
from ..sampling.samplers import MCSampler, SobolQMCNormalSampler
from ..utils.transforms import (
concatenate_pending_points,
match_batch_shape,
t_batch_mode_transform,
)
from .acquisition import AcquisitionFunction
from .objective import IdentityMCObjective, MCAcquisitionObjective
from .utils import prune_inferior_points
class MCAcquisitionFunction(AcquisitionFunction, ABC):
r"""Abstract base class for Monte-Carlo based batch acquisition functions."""
def __init__(
self,
model: Model,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated.
"""
super().__init__(model=model)
if sampler is None:
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module("sampler", sampler)
if objective is None:
objective = IdentityMCObjective()
elif not isinstance(objective, MCAcquisitionObjective):
raise UnsupportedError(
"Only objectives of type MCAcquisitionObjective are supported for "
"MC acquisition functions."
)
self.add_module("objective", objective)
self.set_X_pending(X_pending)
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
r"""Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations.
"""
pass # pragma: no cover
class qExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Expected Improvement.
This computes qEI by
(1) sampling the joint posterior over q points
(2) evaluating the improvement over the current best for each sample
(3) maximizing over q
(4) averaging over the samples
`qEI(X) = E(max(max Y - best_f, 0)), Y ~ f(X), where X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qEI = qExpectedImprovement(model, best_f, sampler)
>>> qei = qEI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=-1)[0].mean(dim=0)
return q_ei
class qNoisyExpectedImprovement(MCAcquisitionFunction):
r"""MC-based batch Noisy Expected Improvement.
This function does not assume a `best_f` is known (which would require
noiseless observations). Instead, it uses samples from the joint posterior
over the `q` test points and previously observed points. The improvement
over previously observed points is computed for each sample and averaged.
`qNEI(X) = E(max(max Y - max Y_baseline, 0))`, where
`(Y, Y_baseline) ~ f((X, X_baseline)), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qNEI = qNoisyExpectedImprovement(model, train_X, sampler)
>>> qnei = qNEI(test_X)
"""
def __init__(
self,
model: Model,
X_baseline: Tensor,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
prune_baseline: bool = False,
) -> None:
r"""q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if prune_baseline:
X_baseline = prune_inferior_points(
model=model, X=X_baseline, objective=objective
)
self.register_buffer("X_baseline", X_baseline)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`.
"""
q = X.shape[-2]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=-2)
# TODO (T41248036): Implement more efficient way to compute posterior
# over both training and test points in GPyTorch
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = obj[:, :, :q].max(dim=-1)[0] - obj[:, :, q:].max(dim=-1)[0]
return diffs.clamp_min(0).mean(dim=0)
class qProbabilityOfImprovement(MCAcquisitionFunction):
r"""MC-based batch Probability of Improvement.
Estimates the probability of improvement over the current best observed
value by sampling from the joint posterior distribution of the q-batch.
MC-based estimates of a probability involves taking expectation of an
indicator function; to support auto-differntiation, the indicator is
replaced with a sigmoid function with temperature parameter `tau`.
`qPI(X) = P(max Y >= best_f), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> best_f = train_Y.max()[0]
>>> sampler = SobolQMCNormalSampler(1000)
>>> qPI = qProbabilityOfImprovement(model, best_f, sampler)
>>> qpi = qPI(test_X)
"""
def __init__(
self,
model: Model,
best_f: Union[float, Tensor],
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
tau: float = 1e-3,
) -> None:
r"""q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
if not torch.is_tensor(best_f):
best_f = torch.tensor(float(best_f))
self.register_buffer("best_f", best_f)
if not torch.is_tensor(tau):
tau = torch.tensor(float(tau))
self.register_buffer("tau", tau)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=-1)[0]
val = torch.sigmoid((max_obj - self.best_f) / self.tau).mean(dim=0)
return val
class qSimpleRegret(MCAcquisitionFunction):
r"""MC-based batch Simple Regret.
Samples from the joint posterior over the q-batch and computes the simple
regret.
`qSR(X) = E(max Y), Y ~ f(X), X = (x_1,...,x_q)`
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qSR = qSimpleRegret(model, sampler)
>>> qsr = qSR(test_X)
"""
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=-1)[0].mean(dim=0)
return val
class qUpperConfidenceBound(MCAcquisitionFunction):
r"""MC-based batch Upper Confidence Bound.
Uses a reparameterization to extend UCB to qUCB for q > 1 (See Appendix A
of [Wilson2017reparam].)
`qUCB = E(max(mu + |Y_tilde - mu|))`, where `Y_tilde ~ N(mu, beta pi/2 Sigma)`
and `f(X)` has distribution `N(mu, Sigma)`.
Example:
>>> model = SingleTaskGP(train_X, train_Y)
>>> sampler = SobolQMCNormalSampler(1000)
>>> qUCB = qUpperConfidenceBound(model, 0.1, sampler)
>>> qucb = qUCB(test_X)
"""
def __init__(
self,
model: Model,
beta: float,
sampler: Optional[MCSampler] = None,
objective: Optional[MCAcquisitionObjective] = None,
X_pending: Optional[Tensor] = None,
) -> None:
r"""q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
"""
super().__init__(
model=model, sampler=sampler, objective=objective, X_pending=X_pending
)
self.beta_prime = math.sqrt(beta * math.pi / 2)
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`.
"""
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = mean + self.beta_prime * (obj - mean).abs()
return ucb_samples.max(dim=-1)[0].mean(dim=0)
| [
"torch.is_tensor",
"torch.sigmoid"
] | 0.3.5 | BradyBromley/botorch | 270599207f5b9bf8c66e1197ad2632bb69c3d3b9 |
1.4 | import io
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = torch.Tensor(tensor)
shape = list(tensor.shape)
pad_shape = shape[:]
pad_shape[axis] = max(0, new_size - shape[axis])
start = 0 if shape[axis] <= new_size else np.random.randint(
shape[axis] - new_size) # random clip
old_length = shape[axis]
shape[axis] = min(shape[axis], new_size)
resized = torch.cat([
torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,))
) if start > 0 and random_sampling else torch.narrow(tensor, dim=axis, start=start, length=shape[axis]),
torch.Tensor(*pad_shape).fill_(fill_value)
], dim=axis)
return resized
class CircleLoss(torch.nn.Module):
def __init__(self, m=0.25, gamma=256):
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, logits, labels):
alpha = torch.clamp_min(logits + self.m, min=0).detach() # an
alpha[labels] = torch.clamp_min(-logits[labels] + 1 + self.m, min=0).detach() # ap
delta = torch.ones_like(logits, device=logits.device, dtype=logits.dtype) * self.m # delta_n
delta[labels] = 1 - self.m # delta_p
return self.loss(alpha * (logits - delta) * self.gamma, labels) | [
"torch.narrow",
"torch.randint",
"torch.ones_like",
"torch.Tensor",
"torch.nn.CrossEntropyLoss",
"torch.clamp_min"
] | 1.4.0 | glee1228/segment_temporal_context_aggregation | e5778f848f1cfd89bd1f77beb5e1b38a66a2f13d |
1.7 | import copy
import torch
import logging
import numpy as np
from sacred import Experiment
from noge.data_loaders import get_datasets, get_test_loader, get_train_generator
from noge.factory import make_env, make_memory
from noge.network import make_network
from noge.agent import Actor, main_loop, loop_ing
from noge.trainers import DQNTrainer, Replay
from noge.policies import LinearSchedule, GraphDQNPolicy
from noge.preprocessors import Preprocessor
from noge.evaluation import Evaluator, eval_ing
from noge.constants import CONFIGS_DIR, EVAL_DIR
from xlog.utils import get_logger
from xlog.mlflow_observer import MlflowObserver
ex = Experiment(name='NOGE_DQN', ingredients=[eval_ing, loop_ing])
ex.add_config(str(CONFIGS_DIR / 'dqn.yaml'))
ex.logger = get_logger(__name__, level=logging.INFO)
ex.observers = [MlflowObserver(tracking_uri=str(EVAL_DIR.absolute()))]
@ex.automain
def train(dataset, test_size, max_episode_steps, reward_type, input_meas_type, meas_transform,
target_transform, node_history, gamma, target_update_freq,
cat_features, feature_range, replay_capacity, min_horizon, epsilon_start, epsilon_end,
exploration_frac, n_train_steps, train_freq, loss, batch_size, lr, n_test_episodes, init_eval,
n_eval_artifacts, test_freq, log_freq, device, seed, data_seed, save_model, _log, _run, _config):
np.set_printoptions(precision=2, suppress=True)
if device.startswith('cuda'):
assert torch.cuda.is_available()
logger = _log
device = torch.device(device)
# data source
train_set, test_set = get_datasets(dataset, seed=data_seed, test_size=test_size)
max_nodes = max(train_set.max_nodes, test_set.max_nodes)
max_edges = 2 * max(train_set.max_edges, test_set.max_edges) # for undirected graphs, consider both directions
test_loader = get_test_loader(test_set, seed=seed, num_samples=n_test_episodes)
train_gen = get_train_generator(train_set, seed=seed)
preprocessor = Preprocessor(input_meas_type=input_meas_type,
output_meas_type=input_meas_type,
feature_range=feature_range,
meas_transform=meas_transform,
target_transform=target_transform,
temporal_offsets=[1.],
max_nodes=max_nodes,
device=device)
# environment
train_env_config = dict(
max_episode_steps=max_episode_steps,
reward_type=reward_type,
max_nodes=max_nodes,
max_edges=max_edges,
nn_feat='N' in cat_features,
)
train_env = make_env(**train_env_config, data_generator=train_gen, seed=seed)
test_env_config = copy.deepcopy(train_env_config)
test_env_config.update(sample_goals=False, data_generator=None)
test_env = make_env(**test_env_config, seed=seed)
# graph memory + graph preprocessing
neg_label, pos_label = feature_range
mem_features = dict(cat=cat_features)
graph_mem_config = dict(
max_episode_steps=max_episode_steps,
max_nodes=max_nodes,
max_edges=max_edges,
history=node_history,
memory_type='cat',
features=mem_features,
neg_label=neg_label,
pos_label=pos_label
)
eval_memory = make_memory(online=True, **graph_mem_config)
acting_memory = make_memory(online=True, **graph_mem_config)
# model
model_config = dict(
dim_node=eval_memory.dim_node,
dim_meas=preprocessor.dim_input_meas,
dim_goal=1,
max_edges=max_edges,
**_config['model']
)
network = make_network(**model_config).to(device)
# evaluation
eval_policy = GraphDQNPolicy(network, eval_memory, preprocessor=preprocessor, device=device)
evaluator = Evaluator(test_loader, test_env, eval_policy)
# experience collecting policy
exploration_steps = int(exploration_frac * n_train_steps)
exploration_schedule = LinearSchedule(epsilon_start, epsilon_end, exploration_steps)
acting_policy = GraphDQNPolicy(network,
graph_memory=acting_memory,
preprocessor=preprocessor,
exploration_schedule=exploration_schedule,
device=device)
# replay buffer
replay_buffer = Replay(capacity=replay_capacity,
ob_space=train_env.observation_space,
graph_mem_config=graph_mem_config,
min_horizon=min_horizon)
# actor: runs the simulation forward and stores to the replay buffer
actor = Actor(train_env, acting_policy, replay_buffer)
# trainer
optimizer = torch.optim.Adam(network.parameters(), lr=lr)
if loss == 'mse':
criterion = torch.nn.MSELoss()
else:
raise ValueError(f"Unsupported loss: {loss}")
trainer = DQNTrainer(gamma=gamma,
target_update_freq=target_update_freq,
replay_buffer=replay_buffer,
batch_size=batch_size,
network=network,
preprocessor=preprocessor,
criterion=criterion,
optimizer=optimizer,
device=device)
# fill up the replay buffer
network.eval()
logger.info(f"Filling up the replay buffer...")
actor.step(n=replay_capacity, use_tqdm=True)
logger.info(f"Replay buffer filled: [{len(replay_buffer)} / {replay_capacity}]")
# fit the preprocessor with buffer data
preprocessor.fit(replay_buffer._measurements)
best_perf = main_loop(actor, trainer, evaluator, network, exploration_schedule,
init_eval, n_eval_artifacts, n_train_steps, train_freq, log_freq, test_freq, save_model)
train_env.close()
evaluator.close()
return best_perf
| [
"torch.cuda.is_available",
"torch.device",
"torch.nn.MSELoss"
] | 1.7.1 | johny-c/noge | 88e68ba8c51ff0d63577991e233e9110cb76e228 |
1.8 | import os
import torch
from typing import List
from dqc.utils.datastruct import CGTOBasis
__all__ = ["loadbasis"]
_dtype = torch.double
_device = torch.device("cpu")
def loadbasis(cmd: str, dtype: torch.dtype = _dtype,
device: torch.device = _device, requires_grad: bool = False) -> \
List[CGTOBasis]:
"""
Load basis from a file and return the list of CGTOBasis.
Arguments
---------
cmd: str
This can be a file path where the basis is stored or a
string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.
dtype: torch.dtype
Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis
device: torch.device
Tensor device for ``alphas`` and ``coeffs``
requires_grad: bool
If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable
Returns
-------
list of CGTOBasis
List of GTO basis loaded from the given file
"""
res = []
if not os.path.exists(cmd):
file = _get_basis_file(cmd)
else:
file = cmd
# read the content
with open(file, "r") as f:
lines = f.read().split("\n")
# skip the header
while True:
line = lines.pop(0)
if line == "":
continue
if line.startswith("!"):
continue
break
# now it is at the orbital description
while len(lines) > 0:
line = lines.pop(0)
if line.startswith("**"):
break
desc = line.split()
nlines = int(desc[1])
if nlines == 0:
raise RuntimeError("Zero line on basis %s" % file)
# read the exponents and the coefficients
alphas = []
coeffsT = []
for i in range(nlines):
alphacoeff = [_read_float(f) for f in lines.pop(0).split()]
alphas.append(alphacoeff[0])
coeffsT.append(alphacoeff[1:])
# coeffsT: list with shape (nbasis, ncontr)
# coeffs: list with shape (ncontr, nbasis)
coeffs = list(zip(*coeffsT))
ncoeffs = len(coeffs)
angmoms = _expand_angmoms(desc[0], ncoeffs)
# convert to tensor
alpha = torch.tensor(alphas, dtype=dtype, device=device, requires_grad=requires_grad)
for i in range(ncoeffs):
coeff = torch.tensor(coeffs[i], dtype=dtype, device=device, requires_grad=requires_grad)
basis = CGTOBasis(angmom=angmoms[i], alphas=alpha, coeffs=coeff)
basis.wfnormalize_()
res.append(basis)
return res
def _read_float(s: str) -> float:
s = s.replace("D", "E")
return float(s)
def _get_basis_file(cmd: str) -> str:
# parse the string command, check if the basis has already been downloaded
# (download if not), and return the file name
# parse to get the atomz and the basisname
atomz_str, raw_basisname = cmd.split(":")
raw_basisname = raw_basisname.strip()
atomz = int(atomz_str)
# get the path to the database
basisname = _normalize_basisname(raw_basisname)
thisdir = os.path.dirname(os.path.realpath(__file__))
fname = "%02d.gaussian94" % atomz
fdir = os.path.join(thisdir, ".database", basisname)
fpath = os.path.join(fdir, fname)
# if the file does not exist, download it
if not os.path.exists(fpath):
print("The %s basis for atomz %d does not exist, but we will download it" %
(raw_basisname, atomz))
if not os.path.exists(fdir):
os.makedirs(fdir)
_download_basis(fpath, atomz, raw_basisname)
return fpath
def _normalize_basisname(basisname: str) -> str:
b = basisname.lower()
b = b.replace("+", "p")
b = b.replace("*", "s")
b = b.replace("(", "_")
b = b.replace(")", "_")
b = b.replace(",", "_")
return b
def _download_basis(fname: str, atomz: int, basisname: str) -> None:
import basis_set_exchange as bse
s = bse.get_basis(basisname, elements=[atomz], fmt="gaussian94")
with open(fname, "w") as f:
f.write(s)
print("Downloaded to %s" % fname)
def _expand_angmoms(s: str, n: int) -> List[int]:
# convert the angular momentum characters into angmom and returns a list
# of n integer containing the angular momentums
if len(s) == n:
pass
elif n % len(s) == 0:
s = s * (n // len(s))
else:
raise RuntimeError("Do not know how to read orbital %s with %d coefficient columns" %
(s, n))
s = s.lower()
spdfmap = {
"s": 0,
"p": 1,
"d": 2,
"f": 3,
"g": 4,
"h": 5,
"i": 6,
}
angmoms = [spdfmap[c] for c in s]
return angmoms
| [
"torch.device",
"torch.tensor"
] | 1.8 | Jaikinator/dqc | 47c964c7d1323a35f4f69521d40476c41843810e |
1.8 | from typing import overload, Tuple
import torch
__all__ = ["BaseOrbParams", "QROrbParams", "MatExpOrbParams"]
class BaseOrbParams(object):
"""
Class that provides free-parameterization of orthogonal orbitals.
"""
@overload
@staticmethod
def params2orb(params: torch.Tensor, coeffs: torch.Tensor, with_penalty: None) -> torch.Tensor:
...
@overload
@staticmethod
def params2orb(params: torch.Tensor, coeffs: torch.Tensor, with_penalty: float) \
-> Tuple[torch.Tensor, torch.Tensor]:
...
@staticmethod
def params2orb(params, coeffs, with_penalty):
"""
Convert the parameters & coefficients to the orthogonal orbitals.
``params`` is the tensor to be optimized in variational method, while
``coeffs`` is a tensor that is needed to get the orbital, but it is not
optimized in the variational method.
"""
pass
@staticmethod
def orb2params(orb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Get the free parameters from the orthogonal orbitals. Returns ``params``
and ``coeffs`` described in ``params2orb``.
"""
pass
class QROrbParams(BaseOrbParams):
@overload
@staticmethod
def params2orb(params: torch.Tensor, coeffs: torch.Tensor, with_penalty: None) -> torch.Tensor:
...
@overload
@staticmethod
def params2orb(params: torch.Tensor, coeffs: torch.Tensor, with_penalty: float) \
-> Tuple[torch.Tensor, torch.Tensor]:
...
@staticmethod
def params2orb(params, coeffs, with_penalty):
orb, _ = torch.linalg.qr(params)
if with_penalty is None:
return orb
else:
# QR decomposition's solution is not unique in a way that every column
# can be multiplied by -1 and it still a solution
# So, to remove the non-uniqueness, we will make the sign of the sum
# positive.
s1 = torch.sign(orb.sum(dim=-2, keepdim=True)) # (*BD, 1, norb)
s2 = torch.sign(params.sum(dim=-2, keepdim=True))
penalty = torch.mean((orb * s1 - params * s2) ** 2) * with_penalty
return orb, penalty
@staticmethod
def orb2params(orb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
coeffs = torch.tensor([0], dtype=orb.dtype, device=orb.device)
return orb, coeffs
class MatExpOrbParams(BaseOrbParams):
"""
Orthogonal orbital parameterization using matrix exponential.
The orthogonal orbital is represented by:
P = matrix_exp(Q) @ C
where C is an orthogonal coefficient tensor, and Q is the parameters defining
the rotation of the orthogonal tensor.
"""
@overload
@staticmethod
def params2orb(params: torch.Tensor, coeffs: torch.Tensor, with_penalty: None) -> torch.Tensor:
...
@overload
@staticmethod
def params2orb(params: torch.Tensor, coeffs: torch.Tensor, with_penalty: float) \
-> Tuple[torch.Tensor, torch.Tensor]:
...
@staticmethod
def params2orb(params, coeffs, with_penalty):
# params: (*, nparams)
# coeffs: (*, nao, norb)
nao = coeffs.shape[-2]
norb = coeffs.shape[-1]
nparams = params.shape[-1]
bshape = params.shape[:-1]
# construct the rotation parameters
triu_idxs = torch.triu_indices(nao, nao, offset=1)[..., :nparams]
rotmat = torch.zeros((*bshape, nao, nao), dtype=params.dtype, device=params.device)
rotmat[..., triu_idxs[0], triu_idxs[1]] = params
rotmat = rotmat - rotmat.transpose(-2, -1).conj()
# calculate the orthogonal orbital
ortho_orb = torch.matrix_exp(rotmat) @ coeffs
if with_penalty:
penalty = torch.zeros((1,), dtype=params.dtype, device=params.device)
return ortho_orb, penalty
else:
return ortho_orb
@staticmethod
def orb2params(orb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# orb: (*, nao, norb)
nao = orb.shape[-2]
norb = orb.shape[-1]
nparams = norb * (nao - norb) + norb * (norb - 1) // 2
# the orbital becomes the coefficients while params is all zeros (no rotation)
coeffs = orb
params = torch.zeros((*orb.shape[:-2], nparams), dtype=orb.dtype, device=orb.device)
return params, coeffs
| [
"torch.zeros",
"torch.triu_indices",
"torch.linalg.qr",
"torch.matrix_exp",
"torch.tensor",
"torch.mean"
] | 1.8 | Jaikinator/dqc | 47c964c7d1323a35f4f69521d40476c41843810e |
1.9 | import torch
from recstudio.ann import sampler
from recstudio.data import dataset
from recstudio.model import basemodel, loss_func, scorer
r"""
HGN
########
Paper Reference:
Chen ma, et al. "HGN: Hierarchical Gating Networks for Sequential Recommendation" in KDD2019.
https://dl.acm.org/doi/abs/10.1145/3292500.3330984
"""
class HGNQueryEncoder(torch.nn.Module):
def __init__(self, fuid, fiid, num_users, embed_dim, max_seq_len, item_encoder, pooling_type='mean') -> None:
super().__init__()
self.fuid = fuid
self.fiid = fiid
self.item_encoder = item_encoder
self.pooling_type = pooling_type
self.user_embedding = torch.nn.Embedding(num_users, embed_dim, 0)
self.W_g_1 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.W_g_2 = torch.nn.Linear(embed_dim, embed_dim, bias=False)
self.b_g = torch.nn.Parameter(torch.empty(embed_dim), requires_grad=True)
self.w_g_3 = torch.nn.Linear(embed_dim, 1, bias=False)
self.W_g_4 = torch.nn.Linear(embed_dim, max_seq_len)
def forward(self, batch):
U = self.user_embedding(batch[self.fuid])
S = self.item_encoder(batch['in_'+self.fiid])
S_F = S * torch.sigmoid(self.W_g_1(S) + self.W_g_2(U).view(U.size(0), 1, -1) + self.b_g)
weight = torch.sigmoid(self.w_g_3(S_F) + ([email protected]_g_4.weight[:S.size(1)].T).view(U.size(0), -1, 1)) # BxLx1
S_I = S_F * weight
if self.pooling_type == 'mean':
s = S_I.sum(1) / weight.sum(1)
elif self.pooling_type == 'max':
s = torch.max(S_I, dim=1).values
else:
raise ValueError("`pooling_type` only support `avg` and `max`")
query = U + s + S.sum(1)
return query
class HGN(basemodel.BaseRetriever):
r"""HGN proposes a hierarchical gating network, integrated with the Bayesian Personalized Ranking
(BPR) to capture both the long-term and short-term user interests. HGN consists of a feature
gating module, an instance gating module, and an item-item product module."""
def _get_dataset_class(self):
r"""The dataset is SeqDataset."""
return dataset.SeqDataset
def _get_query_encoder(self, train_data):
return HGNQueryEncoder(self.fuid, self.fiid, train_data.num_users, self.embed_dim, \
train_data.config['max_seq_len'], self.item_encoder, self.config['pooling_type'])
def _get_scorer_func(self):
return scorer.InnerProductScorer()
def _get_loss_func(self):
r"""BPR loss is used."""
return loss_func.BPRLoss()
def _get_sampler(self, train_data):
return sampler.UniformSampler(train_data.num_items-1)
| [
"torch.nn.Linear",
"torch.empty",
"torch.nn.Embedding",
"torch.max"
] | 1.9.0 | ustc-recsys/Torchrec | 4d62ee42018c12961850936cfd8f4f8d3c6a8dbc |
1.8 | import pytest
import numpy as np
import torch
from doctr.models.preprocessor import PreProcessor
@pytest.mark.parametrize(
"batch_size, output_size, input_tensor, expected_batches, expected_value",
[
[2, (128, 128), np.full((3, 256, 128, 3), 255, dtype=np.uint8), 1, .5], # numpy uint8
[2, (128, 128), np.ones((3, 256, 128, 3), dtype=np.float32), 1, .5], # numpy fp32
[2, (128, 128), np.ones((3, 256, 128, 3), dtype=np.float16), 1, .5], # numpy fp16
[2, (128, 128), torch.full((3, 3, 256, 128), 255, dtype=torch.uint8), 1, .5], # torch uint8
[2, (128, 128), torch.ones((3, 3, 256, 128), dtype=torch.float32), 1, .5], # torch fp32
[2, (128, 128), torch.ones((3, 3, 256, 128), dtype=torch.float16), 1, .5], # torch fp16
[2, (128, 128), [np.full((256, 128, 3), 255, dtype=np.uint8)] * 3, 2, .5], # list of numpy uint8
[2, (128, 128), [np.ones((256, 128, 3), dtype=np.float32)] * 3, 2, .5], # list of numpy fp32
[2, (128, 128), [np.ones((256, 128, 3), dtype=np.float16)] * 3, 2, .5], # list of numpy fp16
[2, (128, 128), [torch.full((3, 256, 128), 255, dtype=torch.uint8)] * 3, 2, .5], # list of torch uint8
[2, (128, 128), [torch.ones((3, 256, 128), dtype=torch.float32)] * 3, 2, .5], # list of torch fp32
[2, (128, 128), [torch.ones((3, 256, 128), dtype=torch.float16)] * 3, 2, .5], # list of torch fp32
],
)
def test_preprocessor(batch_size, output_size, input_tensor, expected_batches, expected_value):
processor = PreProcessor(output_size, batch_size)
# Invalid input type
with pytest.raises(TypeError):
processor(42)
# 4D check
with pytest.raises(AssertionError):
processor(np.full((256, 128, 3), 255, dtype=np.uint8))
with pytest.raises(TypeError):
processor(np.full((1, 256, 128, 3), 255, dtype=np.int32))
# 3D check
with pytest.raises(AssertionError):
processor([np.full((3, 256, 128, 3), 255, dtype=np.uint8)])
with pytest.raises(TypeError):
processor([np.full((256, 128, 3), 255, dtype=np.int32)])
with torch.no_grad():
out = processor(input_tensor)
assert isinstance(out, list) and len(out) == expected_batches
assert all(isinstance(b, torch.Tensor) for b in out)
assert all(b.dtype == torch.float32 for b in out)
assert all(b.shape[-2:] == output_size for b in out)
assert all(torch.all(b == expected_value) for b in out)
assert len(repr(processor).split('\n')) == 4
# Check FP16
processor = PreProcessor(output_size, batch_size, fp16=True)
with torch.no_grad():
out = processor(input_tensor)
assert all(b.dtype == torch.float16 for b in out)
| [
"torch.no_grad",
"torch.ones",
"torch.all",
"torch.full"
] | 1.8.0 | mzeidhassan/doctr | 14b376e07d31b09b6bd31bceebf6ffb477c30f08 |
1.8 | import os
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import torchvision
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import STL10, ImageFolder
def build_custom_pipeline():
"""Builds augmentation pipelines for custom data.
If you want to do exoteric augmentations, you can just re-write this function.
Needs to return a dict with the same structure.
"""
pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
return pipeline
def prepare_transforms(dataset: str) -> Tuple[nn.Module, nn.Module]:
"""Prepares pre-defined train and test transformation pipelines for some datasets.
Args:
dataset (str): dataset name.
Returns:
Tuple[nn.Module, nn.Module]: training and validation transformation pipelines.
"""
cifar_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=32, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)),
]
),
}
stl_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=96, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize((96, 96)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4823, 0.4466), (0.247, 0.243, 0.261)),
]
),
}
imagenet_pipeline = {
"T_train": transforms.Compose(
[
transforms.RandomResizedCrop(size=224, scale=(0.08, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
"T_val": transforms.Compose(
[
transforms.Resize(256), # resize shorter
transforms.CenterCrop(224), # take center crop
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.228, 0.224, 0.225)),
]
),
}
custom_pipeline = build_custom_pipeline()
pipelines = {
"cifar10": cifar_pipeline,
"cifar100": cifar_pipeline,
"stl10": stl_pipeline,
"imagenet100": imagenet_pipeline,
"imagenet": imagenet_pipeline,
"custom": custom_pipeline,
}
assert dataset in pipelines
pipeline = pipelines[dataset]
T_train = pipeline["T_train"]
T_val = pipeline["T_val"]
return T_train, T_val
def prepare_datasets(
dataset: str,
T_train: Callable,
T_val: Callable,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
) -> Tuple[Dataset, Dataset]:
"""Prepares train and val datasets.
Args:
dataset (str): dataset name.
T_train (Callable): pipeline of transformations for training dataset.
T_val (Callable): pipeline of transformations for validation dataset.
data_dir Optional[Union[str, Path]]: path where to download/locate the dataset.
train_dir Optional[Union[str, Path]]: subpath where the training data is located.
val_dir Optional[Union[str, Path]]: subpath where the validation data is located.
Returns:
Tuple[Dataset, Dataset]: training dataset and validation dataset.
"""
if data_dir is None:
sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
data_dir = sandbox_dir / "datasets"
else:
data_dir = Path(data_dir)
if train_dir is None:
train_dir = Path(f"{dataset}/train")
else:
train_dir = Path(train_dir)
if val_dir is None:
val_dir = Path(f"{dataset}/val")
else:
val_dir = Path(val_dir)
assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"]
if dataset in ["cifar10", "cifar100"]:
DatasetClass = vars(torchvision.datasets)[dataset.upper()]
train_dataset = DatasetClass(
data_dir / train_dir,
train=True,
download=True,
transform=T_train,
)
val_dataset = DatasetClass(
data_dir / val_dir,
train=False,
download=True,
transform=T_val,
)
elif dataset == "stl10":
train_dataset = STL10(
data_dir / train_dir,
split="train",
download=True,
transform=T_train,
)
val_dataset = STL10(
data_dir / val_dir,
split="test",
download=True,
transform=T_val,
)
elif dataset in ["imagenet", "imagenet100", "custom"]:
train_dir = data_dir / train_dir
val_dir = data_dir / val_dir
train_dataset = ImageFolder(train_dir, T_train)
val_dataset = ImageFolder(val_dir, T_val)
return train_dataset, val_dataset
def prepare_dataloaders(
train_dataset: Dataset, val_dataset: Dataset, batch_size: int = 64, num_workers: int = 4
) -> Tuple[DataLoader, DataLoader]:
"""Wraps a train and a validation dataset with a DataLoader.
Args:
train_dataset (Dataset): object containing training data.
val_dataset (Dataset): object containing validation data.
batch_size (int): batch size.
num_workers (int): number of parallel workers.
Returns:
Tuple[DataLoader, DataLoader]: training dataloader and validation dataloader.
"""
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=False,
)
return train_loader, val_loader
def prepare_data(
dataset: str,
transform: Optional[Callable] = None,
data_dir: Optional[Union[str, Path]] = None,
train_dir: Optional[Union[str, Path]] = None,
val_dir: Optional[Union[str, Path]] = None,
batch_size: int = 64,
num_workers: int = 4,
) -> Tuple[DataLoader, DataLoader]:
"""Prepares transformations, creates dataset objects and wraps them in dataloaders.
Args:
dataset (str): dataset name.
data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset.
Defaults to None.
train_dir (Optional[Union[str, Path]], optional): subpath where the
training data is located. Defaults to None.
val_dir (Optional[Union[str, Path]], optional): subpath where the
validation data is located. Defaults to None.
batch_size (int, optional): batch size. Defaults to 64.
num_workers (int, optional): number of parallel workers. Defaults to 4.
Returns:
Tuple[DataLoader, DataLoader]: prepared training and validation dataloader;.
"""
if transform is None:
T_train, T_val = prepare_transforms(dataset)
else:
T_train = transform
T_val = transform
train_dataset, val_dataset = prepare_datasets(
dataset,
T_train,
T_val,
data_dir=data_dir,
train_dir=train_dir,
val_dir=val_dir,
)
train_loader, val_loader = prepare_dataloaders(
train_dataset,
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
)
return train_loader, val_loader
| [
"torch.utils.data.DataLoader"
] | 1.8.1 | fariasfc/solo-learn | f53ff40edbc7e96e06db5238d8c3a44f7b8965c1 |
1.8 | import torch
import torch.nn.functional as F
def invariance_loss(z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes mse loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: invariance loss (mean squared error).
"""
return F.mse_loss(z1, z2)
def variance_loss(z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes variance loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: variance regularization loss.
"""
eps = 1e-4
std_z1 = torch.sqrt(z1.var(dim=0) + eps)
std_z2 = torch.sqrt(z2.var(dim=0) + eps)
std_loss = torch.mean(F.relu(1 - std_z1)) + torch.mean(F.relu(1 - std_z2))
return std_loss
def covariance_loss(z1: torch.Tensor, z2: torch.Tensor) -> torch.Tensor:
"""Computes covariance loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
Returns:
torch.Tensor: covariance regularization loss.
"""
N, D = z1.size()
z1 = z1 - z1.mean(dim=0)
z2 = z2 - z2.mean(dim=0)
cov_z1 = (z1.T @ z1) / (N - 1)
cov_z2 = (z2.T @ z2) / (N - 1)
diag = torch.eye(D, device=z1.device)
cov_loss = cov_z1[~diag.bool()].pow_(2).sum() / D + cov_z2[~diag.bool()].pow_(2).sum() / D
return cov_loss
def vicreg_loss_func(
z1: torch.Tensor,
z2: torch.Tensor,
sim_loss_weight: float = 25.0,
var_loss_weight: float = 25.0,
cov_loss_weight: float = 1.0,
) -> torch.Tensor:
"""Computes VICReg's loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
sim_loss_weight (float): invariance loss weight.
var_loss_weight (float): variance loss weight.
cov_loss_weight (float): covariance loss weight.
Returns:
torch.Tensor: VICReg loss.
"""
sim_loss = invariance_loss(z1, z2)
var_loss = variance_loss(z1, z2)
cov_loss = covariance_loss(z1, z2)
loss = sim_loss_weight * sim_loss + var_loss_weight * var_loss + cov_loss_weight * cov_loss
return loss
| [
"torch.nn.functional.mse_loss",
"torch.nn.functional.relu",
"torch.eye"
] | 1.8.1 | fariasfc/solo-learn | b75ba6faf5269b0849120bfb89593f9bc23e09bc |
1.7 | import json
import os
import sys
import time
import torch
from training.training import Trainer
from data.conversion import GridDataConverter, PointCloudDataConverter, ERA5Converter
from data.dataloaders import mnist, celebahq
from data.dataloaders_era5 import era5
from data.dataloaders3d import shapenet_voxels, shapenet_point_clouds
from models.discriminator import PointConvDiscriminator
from models.function_distribution import HyperNetwork, FunctionDistribution
from models.function_representation import FunctionRepresentation, FourierFeatures
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get config file from command line arguments
if len(sys.argv) != 2:
raise(RuntimeError("Wrong arguments, use python main.py <config_path>"))
config_path = sys.argv[1]
# Open config file
with open(config_path) as f:
config = json.load(f)
if config["path_to_data"] == "":
raise(RuntimeError("Path to data not specified. Modify path_to_data attribute in config to point to data."))
# Create a folder to store experiment results
timestamp = time.strftime("%Y-%m-%d_%H-%M")
directory = "{}_{}".format(timestamp, config["id"])
if not os.path.exists(directory):
os.makedirs(directory)
# Save config file in experiment directory
with open(directory + '/config.json', 'w') as f:
json.dump(config, f)
# Setup dataloader
is_voxel = False
is_point_cloud = False
is_era5 = False
if config["dataset"] == 'mnist':
dataloader = mnist(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"],
train=True)
input_dim = 2
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"])
elif config["dataset"] == 'celebahq':
dataloader = celebahq(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 2
output_dim = 3
data_shape = (3, config["resolution"], config["resolution"])
elif config["dataset"] == 'shapenet_voxels':
dataloader = shapenet_voxels(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_voxel = True
elif config["dataset"] == 'shapenet_point_clouds':
dataloader = shapenet_point_clouds(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_point_cloud = True
elif config["dataset"] == 'era5':
dataloader = era5(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (46, 90)
is_era5 = True
# Setup data converter
if is_point_cloud:
data_converter = PointCloudDataConverter(device, data_shape, normalize_features=True)
elif is_era5:
data_converter = ERA5Converter(device, data_shape, normalize_features=True)
else:
data_converter = GridDataConverter(device, data_shape, normalize_features=True)
# Setup encoding for function distribution
num_frequencies = config["generator"]["encoding"]["num_frequencies"]
std_dev = config["generator"]["encoding"]["std_dev"]
if num_frequencies:
frequency_matrix = torch.normal(mean=torch.zeros(num_frequencies, input_dim),
std=std_dev).to(device)
encoding = FourierFeatures(frequency_matrix)
else:
encoding = torch.nn.Identity()
# Setup generator models
final_non_linearity = torch.nn.Tanh()
non_linearity = torch.nn.LeakyReLU(0.1)
function_representation = FunctionRepresentation(input_dim, output_dim,
config["generator"]["layer_sizes"],
encoding, non_linearity,
final_non_linearity).to(device)
hypernetwork = HyperNetwork(function_representation, config["generator"]["latent_dim"],
config["generator"]["hypernet_layer_sizes"], non_linearity).to(device)
function_distribution = FunctionDistribution(hypernetwork).to(device)
# Setup discriminator
discriminator = PointConvDiscriminator(input_dim, output_dim, config["discriminator"]["layer_configs"],
linear_layer_sizes=config["discriminator"]["linear_layer_sizes"],
norm_order=config["discriminator"]["norm_order"],
add_sigmoid=True,
add_batchnorm=config["discriminator"]["add_batchnorm"],
add_weightnet_batchnorm=config["discriminator"]["add_weightnet_batchnorm"],
deterministic=config["discriminator"]["deterministic"],
same_coordinates=config["discriminator"]["same_coordinates"]).to(device)
print("\nFunction distribution")
print(hypernetwork)
print("Number of parameters: {}".format(count_parameters(hypernetwork)))
print("\nDiscriminator")
print(discriminator)
print("Number of parameters: {}".format(count_parameters(discriminator)))
# Setup trainer
trainer = Trainer(device, function_distribution, discriminator, data_converter,
lr=config["training"]["lr"], lr_disc=config["training"]["lr_disc"],
r1_weight=config["training"]["r1_weight"],
max_num_points=config["training"]["max_num_points"],
print_freq=config["training"]["print_freq"], save_dir=directory,
model_save_freq=config["training"]["model_save_freq"],
is_voxel=is_voxel, is_point_cloud=is_point_cloud,
is_era5=is_era5)
trainer.train(dataloader, config["training"]["epochs"])
| [
"torch.zeros",
"torch.nn.Identity",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.cuda.is_available"
] | 1.7.0 | EmilienDupont/neural-function-distributions | c034bf79640c6d8922f1c276174b3cb1800d22b4 |
0.4 | from __future__ import print_function
import argparse
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import models
import horovod.torch as hvd
import timeit
import numpy as np
# Benchmark settings
parser = argparse.ArgumentParser(description='PyTorch Synthetic Benchmark',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fp16-allreduce', action='store_true', default=False,
help='use fp16 compression during allreduce')
parser.add_argument('--model', type=str, default='resnet50',
help='model to benchmark')
parser.add_argument('--batch-size', type=int, default=32,
help='input batch size')
parser.add_argument('--num-warmup-batches', type=int, default=10,
help='number of warm-up batches that don\'t count towards benchmark')
parser.add_argument('--num-batches-per-iter', type=int, default=10,
help='number of batches per benchmark iteration')
parser.add_argument('--num-iters', type=int, default=10,
help='number of benchmark iterations')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--use-adasum', action='store_true', default=False,
help='use adasum algorithm to do reduction')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
hvd.init()
if args.cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
cudnn.benchmark = True
# Set up standard model.
model = getattr(models, args.model)()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if args.cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
optimizer = optim.SGD(model.parameters(), lr=0.01 * lr_scaler)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Set up fixed fake data
data = torch.randn(args.batch_size, 3, 224, 224)
target = torch.LongTensor(args.batch_size).random_() % 1000
if args.cuda:
data, target = data.cuda(), target.cuda()
def benchmark_step():
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
def log(s, nl=True):
if hvd.rank() != 0:
return
print(s, end='\n' if nl else '')
log('Model: %s' % args.model)
log('Batch size: %d' % args.batch_size)
device = 'GPU' if args.cuda else 'CPU'
log('Number of %ss: %d' % (device, hvd.size()))
# Warm-up
log('Running warmup...')
timeit.timeit(benchmark_step, number=args.num_warmup_batches)
# Benchmark
log('Running benchmark...')
img_secs = []
for x in range(args.num_iters):
time = timeit.timeit(benchmark_step, number=args.num_batches_per_iter)
img_sec = args.batch_size * args.num_batches_per_iter / time
log('Iter #%d: %.1f img/sec per %s' % (x, img_sec, device))
img_secs.append(img_sec)
# Results
img_sec_mean = np.mean(img_secs)
img_sec_conf = 1.96 * np.std(img_secs)
log('Img/sec per %s: %.1f +-%.1f' % (device, img_sec_mean, img_sec_conf))
log('Total img/sec on %d %s(s): %.1f +-%.1f' %
(hvd.size(), device, hvd.size() * img_sec_mean, hvd.size() * img_sec_conf))
| [
"torch.nn.functional.cross_entropy"
] | 0.4.0 | zmldndx/horovod | e9b1e228ff92eb7f65d9aea2d36f23b327df28bd |
1.9 | from typing import ClassVar, List, Optional, Tuple, Callable, Union, cast
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
from alphazero.network.distributions import SquashedNormal, GeneralizedBeta
from alphazero.network.utils import (
_map_nonlinearities,
_process_str,
)
__all__ = [
"make_policy",
"DiagonalNormalPolicy",
"DiagonalGMMPolicy",
"GeneralizedBetaPolicy",
"DiscretePolicy",
]
class Policy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.action_bound = action_bound
# boundaries for the log standard deviation to increae training stability
self.log_param_min = log_param_min
self.log_param_max = log_param_max
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
components: int = getattr(self, "num_components", 1)
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, components={components}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, action_bounds={self.bounds}, "
f"log_std_bounds={self.log_param_bounds}, hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
@property
def bounds(self) -> np.ndarray:
if self.action_bound is None:
return np.array([-np.inf, np.inf], dtype=np.float32)
else:
return np.array([-self.action_bound, self.action_bound], dtype=np.float32)
@torch.no_grad()
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
raise NotImplementedError
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
raise NotImplementedError
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
x = self.trunk(x)
V_hat = self.value_head(x)
self.train()
return V_hat.detach().cpu().numpy()
class DiscretePolicy(nn.Module):
"""Base policy class.
The base policy is responsible for instanting the linear layers and value head.
It also defines some interface functions.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
- "beta", "generalizedbeta": Beta distribution with transformed support on (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log parameters.
log_param_max : int
Upper bound for learned log parameters.
"""
# member type annotations
state_dim: int
action_dim: int
num_actions: int
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
value_head: nn.Linear
# class variable
distribution_type: ClassVar[str] = "Categorical"
def __init__(
self,
representation_dim: int,
action_dim: int,
num_actions: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
):
super().__init__()
self.state_dim = representation_dim
self.action_dim = action_dim
self.num_actions = num_actions
assert hidden_dimensions, "Hidden dimensions can't be empty."
self.hidden_dimensions = hidden_dimensions
self.hidden_layers = len(hidden_dimensions)
self.distribution = D.Categorical
activation: Callable[..., nn.Module] = _map_nonlinearities(nonlinearity)
self.layernorm = layernorm
# generate neural network except distribution heads
layers = [
nn.Linear(self.state_dim, hidden_dimensions[0]),
activation(inplace=True),
]
if layernorm:
layers.append(nn.LayerNorm(normalized_shape=hidden_dimensions[0]))
if 1 < self.hidden_layers:
for i, hidden_dim in enumerate(hidden_dimensions[:-1]):
hid = [
nn.Linear(hidden_dim, hidden_dimensions[i + 1]),
activation(inplace=True),
]
if layernorm:
hid.append(nn.LayerNorm(normalized_shape=hidden_dimensions[i + 1]))
layers.extend(hid)
self.trunk = nn.Sequential(*layers)
self.value_head = nn.Linear(hidden_dimensions[-1], 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], num_actions)
def __repr__(self) -> str:
"""
Returns
-------
str
String representation of this instance.
"""
return (
f"class={type(self).__name__}, distribution={self.distribution_type}, num_actions={self.num_actions}, "
f"state_dim={self.state_dim}, action_dim={self.action_dim}, "
f"hidden_layers={self.hidden_layers}, hidden_units={self.hidden_dimensions}, "
f"nonlinearity={type(self.trunk[1]).__name__}, layernorm={self.layernorm}"
)
def _get_dist_params(
self, x: torch.Tensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
pi_logits = self.dist_head(x)
return pi_logits, V_hat
def forward(self, x: torch.FloatTensor) -> Tuple[D.Categorical, torch.FloatTensor]:
"""Forward pass of the model.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[Normallike, torch.FloatTensor]
Normal or squashed Normal distribution (dist), State value estimate (V_hat).
"""
pi_logits, V_hat = self._get_dist_params(x)
dist = D.Categorical(logits=pi_logits)
# samples from dist have shape [batch_size, action_dim]
return dist, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
pi_logits, V_hat = self._get_dist_params(states)
# This creates an independent distribution for each action possibility
# so that the batch_shape of the distribution is identical to the shape of actions
# It's needed so that the log_probs are of the proper shape [batch_size, num_actions]
# else this throws since the distribution's batch_shape=[batch_shape] doesn't match
# the shape of the actions tensor, which is [batch_size, num_actions]
num_actions = actions.shape[1]
pi_hat = D.Categorical(
logits=pi_logits.unsqueeze(dim=1).repeat((1, num_actions, 1))
)
log_probs = pi_hat.log_prob(actions)
entropy = pi_hat.entropy()
return log_probs, entropy, V_hat
@torch.no_grad()
def predict_V(self, x: torch.Tensor) -> np.ndarray:
self.eval()
_, V_hat = self(x)
self.train()
return V_hat.detach().cpu().numpy()
@torch.no_grad()
def predict_pi(self, x: torch.Tensor) -> np.ndarray:
self.eval()
logits, _ = self._get_dist_params(x)
self.train()
return F.softmax(logits, dim=-1).detach().cpu().numpy()
class DiagonalNormalPolicy(Policy):
"""Policy class for factorized normal distributions.
Learns parameters for a factorized normal distribution of types
Normal, TanhSquashedNormal or GeneralizedSquashedNormal.
Factorized means that a conditionally independent (given a state) 1D Normal distribution is
learned for each dimension of the action space instead of a Multivariate Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be a Normallike distribution.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviation.
log_param_max : int
Upper bound for learned log standard deviation.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalNormal"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# dist_head returns a tensor of shape [batch_size, 2*action_dim]
# split this tensor along the last dimension into parameters for mu and sigma
mu, log_std = self.dist_head(x).chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, V_hat = self(states)
# This aligns the distribution batch_shape with the number of actions at the root
# It can be thought of as generating num_actions identical normal distributions for each agent
# and then sampling the log_prob for action from the distribution
# num_actions = actions.shape[-1]
# mu = mu.expand((-1, num_actions))
# sigma = sigma.expand((-1, num_actions))
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
log_probs = normal.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, _ = self(x)
normal: Union[D.Normal, SquashedNormal]
if self.action_bound:
normal = SquashedNormal(mu, sigma, self.action_bound)
else:
normal = D.Normal(mu, sigma)
action = normal.sample()
self.train()
return action.detach().cpu().numpy()
class DiagonalGMMPolicy(Policy):
"""Policy class for learning a factorized GMM.
Learns a 1D GMM for each dimension of the action space.
The components of the GMM are either Normal or squashed Normal.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
distribution : str
Distribution that is parameterized by the network. Has to be Normallike.
Allows the following options:
- "normal": Normal distribution.
- "tanhsquashed", "tanhsquashednormal": Normal distribution with samples squashed in (-1, 1).
- "generalizedsquashed", "generalizedsquashednormal": Normal distribution with samples squashed in (-c, c).
num_components : int
Number of mixture components.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log standard deviations.
log_param_max : int
Upper bound for learned log standard deviations.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: Optional[float]
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
num_components: int
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "DiagonalGMM"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: Optional[float],
num_components: int,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.num_components = num_components
# calculate the number of parameters needed for the GMM
# 2 comes from each distribution being specifiec by 2 parameters
dist_params = num_components * (2 * self.action_dim + 1)
self.dist_head = nn.Linear(hidden_dimensions[-1], dist_params)
def forward(
self, x: torch.FloatTensor
) -> Tuple[
torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor
]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Distribution mean (mu), Distribution standard deviation (sigma),
Logits for the categorical distribution parameterizing the components (log_coeffs),
State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# mixture_params is a tensor of shape [batch_size, num_agents, 2*action_dim*num_components + num_components]
# the elements in the first term (2*action_dim*num_components) are the parameters for the mixture components
# the elements in the second term (+ num_components) are the mixture coefficients
mixture_params = self.dist_head(x)
# get mixture parameters and reorder to [batch_size, num_agents, 2*num_components, action_dim]
dist_params = mixture_params[
..., : self.num_components * 2 * self.action_dim
].view(x.shape[0], -1)
# get the num_components last tensor elements as logits for the mixture coefficients
log_coeff = mixture_params[..., -self.num_components :]
# split the dist_params along the middle dimension (2*num_components) into means and log stddevs
mu, log_std = dist_params.chunk(2, dim=-1)
# Learning the log_std_dev is a trick for numerical stability
# Since the stddev > 0, we can learn the log and then exponentiate
# constrain log_std inside [log_param_min, log_param_max]
log_std = torch.clamp(log_std, min=self.log_param_min, max=self.log_param_max)
sigma = log_std.exp()
return mu, sigma, log_coeff, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, sigma, log_coeff, V_hat = self(states)
# We need num_actions identical gmms to sample log_probs for each action
num_actions = actions.shape[-1]
mu = mu.unsqueeze(dim=1).expand((-1, num_actions, -1))
sigma = sigma.unsqueeze(dim=1).expand((-1, num_actions, -1))
log_coeff = log_coeff.unsqueeze(dim=1).expand((-1, num_actions, -1))
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
log_probs = gmm.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
mu, sigma, log_coeff, _ = self(x)
mix = D.Categorical(logits=log_coeff)
component: Union[D.Normal, SquashedNormal]
if self.action_bound:
component = SquashedNormal(mu, sigma, self.action_bound)
else:
component = D.Normal(mu, sigma)
gmm = D.MixtureSameFamily(mix, component)
action = gmm.sample()
self.train()
return action.detach().cpu().numpy()
class GeneralizedBetaPolicy(Policy):
"""Policy class for a generalized Beta distribution.
The beta distribution used by this class is generalized in that it has support
[-c, c] instead of [0,1].
This is achieved via a location-scale transformation (2c)x - c, where c are the desired bounds.
Since both parameters alpha, beta > 0, the log-learning-trick for the Normal standard deviation
is applied to both parameters.
Parameters
----------
representation_dim : int
Dimensions of the input representation.
action_dim : int
Number of dimensions for the action space.
action_bound : Optional[float]
Bounds for the action space. Can be either float or None.
hidden_dimensions : List[int]
Specify the number of hidden neurons for each respective hidden layer of the network. Cannot be empty.
nonlinearity : str
Nonlinearity used between hidden layers. Options are:
- "relu": https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html#torch.nn.ReLU .
- "leakyrelu": https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html#torch.nn.LeakyReLU.
- "relu6": https://pytorch.org/docs/stable/generated/torch.nn.ReLU6.html#torch.nn.ReLU6.
- "silu": https://pytorch.org/docs/stable/generated/torch.nn.SiLU.html#torch.nn.SiLU.
- "elu": https://pytorch.org/docs/stable/generated/torch.nn.ELU.html#torch.nn.ELU.
- "hardswish": https://pytorch.org/docs/stable/generated/torch.nn.Hardswish.html#torch.nn.Hardswish.
layernorm : bool
If True, the network is regularized with layer normalization after each liner layer.
This may increase performance, see https://arxiv.org/pdf/1709.06560.pdf for info.
log_param_min : int
Lower bound for learned log_alpha and log_beta.
log_param_max : int
Upper bound for learned log_alpha and log_beta.
"""
# member annotations
state_dim: int
action_dim: int
action_bound: float
log_param_min: float
log_param_max: float
hidden_layers: int
hidden_dimensions: List[int]
trunk: nn.Sequential
dist_head: nn.Linear
value_head: nn.Linear
# class variable
policy_type: ClassVar[str] = "GeneralizedBeta"
def __init__(
self,
representation_dim: int,
action_dim: int,
action_bound: float,
hidden_dimensions: List[int],
nonlinearity: str,
layernorm: bool,
log_param_min: float,
log_param_max: float,
):
assert action_bound, "Beta policy needs action bounds specified."
super().__init__(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
self.dist_head = nn.Linear(hidden_dimensions[-1], 2 * self.action_dim)
def forward(
self, x: torch.FloatTensor
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
"""Returns the learned paremters of the distribution.
Parameters
----------
x : torch.FloatTensor
Input state tensor.
Returns
-------
Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]
Alpha parameter (alpha), Beta parameter (beta), State value estimate (V_hat).
"""
x = self.trunk(x)
V_hat = self.value_head(x)
# create distribution parameters
dist_params = self.dist_head(x)
# Use the log_std_dev trick for alpha and beta
# since both alpha > 0 and beta > 0
dist_params = torch.clamp(
dist_params, min=self.log_param_min, max=self.log_param_max
)
alpha, beta = dist_params.exp().chunk(2, dim=-1)
return alpha, beta, V_hat
def get_train_data(
self, states: torch.Tensor, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
alpha, beta, V_hat = self(states)
# ensure that the distribution batch_shape fits the number of actions taken for
# each agent at the root
num_actions = actions.shape[-1]
alpha = alpha.expand(-1, num_actions)
beta = beta.expand(-1, num_actions)
beta_dist = GeneralizedBeta(alpha, beta, self.action_bound)
log_probs = beta_dist.log_prob(actions)
entropy = -log_probs.mean(dim=-1)
return log_probs, entropy, V_hat
@torch.no_grad()
def sample_action(self, x: torch.Tensor) -> np.ndarray:
self.eval()
alpha, beta, _ = self(x)
beta_dist = D.Beta(alpha, beta)
action = beta_dist.sample()
self.train()
return action.detach().cpu().numpy()
def make_policy(
representation_dim: int,
action_dim: int,
distribution: str,
hidden_dimensions: List[int],
nonlinearity: str,
num_components: Optional[int] = None,
num_actions: Optional[int] = None,
action_bound: Optional[float] = None,
layernorm: bool = False,
log_param_min: float = -5,
log_param_max: float = 2,
) -> Union[
DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy
]:
"""Constructs a policy network from a given config.
The following config keys need to be specified:
- "representation_dim": int
- "action_dim": int
- "distribution": str
- "num_components": int
- "action_bound": float
- "hidden_dimensions": List[int]
- "nonlinearity": str
- "layernorm": bool
- "log_param_min": Optional[float]
- "log_param_max": Optional[float]
Parameters
----------
representation_dim: int
Dimensionality of the vector state space of the environment.
action_dim: int
Number of action dimensions in the environment.
distribution: str
Name of the policy distribution as string ["discrete", "beta", "normal"].
hidden_dimensions: List[int]
List specification of the MLP policy. Each int element in the list represents a hidden
layer in the network with the respective number of neurons.
nonlinearity: str
Nonlinearity (activation function) used in the policy network.
num_components: Optional[int] = None
Number of components for mixture distributions.
num_actions: Optional[int] = None
Number of available actions. Used in the discrete policy.
action_bound: Optional[float] = None
Action bounds for the squashed normal or squashed GMM policy.
layernorm: bool = False
Use Layernorm in the policy network if set to True.
log_param_min: float = -5
Lower bound of the learned log parameters (standard deviation for Normal distributions).
log_param_max: float = 2
Upper bound of the learned log parameters.
Returns
-------
Union[DiscretePolicy, DiagonalNormalPolicy, DiagonalGMMPolicy, GeneralizedBetaPolicy]
Policy network intance.
"""
# basic config string preprocessing to ensure mapping works later
distribution = _process_str(distribution)
nonlinearity = _process_str(nonlinearity)
if distribution == "discrete":
return DiscretePolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_actions=cast(int, num_actions),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
)
elif distribution == "beta":
assert num_components
return GeneralizedBetaPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=cast(float, action_bound),
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
assert num_components
if 1 < num_components:
return DiagonalGMMPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
num_components=num_components,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
else:
return DiagonalNormalPolicy(
representation_dim=representation_dim,
action_dim=action_dim,
action_bound=action_bound,
hidden_dimensions=hidden_dimensions,
nonlinearity=nonlinearity,
layernorm=layernorm,
log_param_min=log_param_min,
log_param_max=log_param_max,
)
| [
"torch.nn.Linear",
"torch.distributions.Categorical",
"torch.nn.LayerNorm",
"torch.nn.Sequential",
"torch.no_grad",
"torch.distributions.Normal",
"torch.clamp",
"torch.distributions.Beta",
"torch.nn.functional.softmax",
"torch.distributions.MixtureSameFamily"
] | 1.9.0 | timoklein/A0C | 2825193f424bd5b74b654c929ef73775b0914ee5 |
1.6 | import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# import deepspeed
# import mpi4py
# import pandas
import torch
import transformers
import wandb
#%env WANDB_PROJECT=wine_gpt2_Trainer_42
MODEL_NAME = "gpt2-medium"
# wandb.login(anonymous='never', key="222a37baaf0c1b0d1499ec003e5c2fe49f97b107")
wandb.init()
# wandb.watch(log='all')
print(torch.cuda.is_available())
print(f"transformers version: {transformers.__version__}")
print(f"PyTorch version: {torch.__version__}")
# Tokenizers
tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL_NAME)
print(len(tokenizer))
tokenizer.add_special_tokens(
{"eos_token": "<|startoftext|>", "bos_token": "<|startoftext|>"}
)
tokenizer.add_tokens(
[
"[prompt]",
"[response]",
"[category_1]",
"[category_2]",
"[origin]",
"[description]",
"<|endoftext|>",
]
)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.save_pretrained("data/modeling/trainer_42/")
print(len(tokenizer))
print("Created tokenizer")
class wineDataset(torch.utils.data.Dataset):
def __init__(self, encodings):
self.encodings = encodings
def __len__(self):
return len(self.encodings["input_ids"])
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = item["input_ids"]
return item
with open("data/scraped/name_desc_nlp_ready_train.txt", "r", encoding="utf8") as file:
wines_raw_train = file.read().splitlines()
with open("data/scraped/name_desc_nlp_ready_test.txt", "r", encoding="utf8") as file:
wines_raw_test = file.read().splitlines()
print("Loaded dataset")
# wines_raw_train, wines_raw_test = train_test_split(wines_raw,test_size=0.2)
# wine_encodings_train = tokenizer(wines_raw_train, max_length=200, truncation=True, padding=True)
wine_encodings_test = tokenizer(
wines_raw_test, max_length=200, truncation=True, padding=True
)
print("Encoded dataset")
# wine_dataset_train = wineDataset(wine_encodings_train)
wine_dataset_test = wineDataset(wine_encodings_test)
print("Created PyTorch DataSet")
# train_loader = torch.utils.data.DataLoader(wine_dataset_train)
model = transformers.AutoModelForCausalLM.from_pretrained(MODEL_NAME)
# model.to('cuda')
model.resize_token_embeddings(len(tokenizer))
print(f"model parameters: {model.num_parameters():,}")
training_args = transformers.TrainingArguments(
output_dir="data/modeling/trainer_42/",
overwrite_output_dir=True,
num_train_epochs=1,
per_device_train_batch_size=2,
save_steps=100,
save_total_limit=2,
fp16=True,
# deepspeed='data/ds_config.json'
)
trainer = transformers.Trainer(
model=model, args=training_args, train_dataset=wine_dataset_test,
)
trainer.train()
| [
"torch.cuda.is_available",
"torch.tensor"
] | 1.6.0 | cipher982/Wine-o-matic | a8000bf5ec86554e9c3c746aae51ba509ab59162 |
1.7 | import torch
import torchvision
from torchvision import transforms, utils, datasets
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from sklearn.metrics import classification_report, confusion_matrix
def makeDataSet(IMAGE_SHAPE = 300,DATA_PATH = './data_after_splitting/'):
image_transforms = {
"train": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
]),
"val": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
])
}
train_dataset = datasets.ImageFolder(root = DATA_PATH + "train",
transform = image_transforms["train"]
)
val_dataset = datasets.ImageFolder(root = DATA_PATH + "val",
transform = image_transforms["val"]
)
train_dataloader = DataLoader(train_dataset, batch_size=4, num_workers=2, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=4, num_workers=2, shuffle=True)
return train_dataloader,val_dataloader
| [
"torch.utils.data.DataLoader"
] | 1.7.1 | manhph2211/Pytorch-Fb-Classification | cf5f9c0b356635020ff245c255d971e450d203fb |
1.2 | """Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
from data import VOC_CLASSES as labelmap
import torch.utils.data as data
from ssd import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/ssd300_mAP_77.43_v2.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.5, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=False, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--voc_root', default=VOC_ROOT,
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
annopath = os.path.join(args.voc_root, 'VOC2007', 'Annotations', '%s.xml')
imgpath = os.path.join(args.voc_root, 'VOC2007', 'JPEGImages', '%s.jpg')
if sys.platform.startswith("linux"):
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets', 'Main', '{:s}.txt') # Linux 系统下
if sys.platform.startswith("win"):
imgsetpath = os.path.join(args.voc_root, 'VOC2007', 'ImageSets', 'Main', '{}.txt') # Linux 系统下
YEAR = '2007'
devkit_path = args.voc_root + 'VOC' + YEAR
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def get_voc_results_file_template(image_set, cls):
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_' + image_set + '_%s.txt' % (cls)
filedir = os.path.join(devkit_path, 'results')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def write_voc_results_file(all_boxes, dataset):
for cls_ind, cls in enumerate(labelmap):
print('Writing {:s} VOC results file'.format(cls))
filename = get_voc_results_file_template(set_type, cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(dataset.ids):
dets = all_boxes[cls_ind+1][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index[1], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(output_dir='output', use_07=True):
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = use_07
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(labelmap):
filename = get_voc_results_file_template(set_type, cls)
rec, prec, ap = voc_eval(
filename, annopath, imgsetpath.format(set_type), cls, cachedir,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('--------------------------------------------------------------')
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
num_images = len(dataset)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(labelmap)+1)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
output_dir = get_output_dir('ssd300_120000', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
for i in range(num_images):
im, gt, h, w = dataset.pull_item(i)
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
_t['im_detect'].tic()
detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
evaluate_detections(all_boxes, output_dir, dataset)
def evaluate_detections(box_list, output_dir, dataset):
write_voc_results_file(box_list, dataset)
do_python_eval(output_dir)
if __name__ == '__main__':
# load net
num_classes = len(labelmap) + 1 # +1 for background
net = build_ssd('test', 300, num_classes) # initialize SSD
#net.load_state_dict(torch.load(args.trained_model))
net.load_state_dict(torch.load(args.trained_model, map_location='cpu')) # running on a CPU-only machine
net.eval()
print('Finished loading model!')
# load data
dataset = VOCDetection(args.voc_root,
[('2007', set_type)],
BaseTransform(300, dataset_mean),
VOCAnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(net.size, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
| [
"torch.cuda.is_available",
"torch.load",
"torch.masked_select",
"torch.set_default_tensor_type"
] | 1.2 | FLyingLSJ/ssd.pytorch | 9caca0788f0bebab345f969a7d3c1f8b2081b809 |
1.3 | import os
import sys
import errno
import random
import pickle
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import DatasetFolder
from torchvision import transforms
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
#==============================================================================
# Network definition
#==============================================================================
class SE_HIPP_3D_Net(nn.Module):
def __init__(self):
super(SE_HIPP_3D_Net, self).__init__()
self.conv1 = nn.Conv2d(28, 32, kernel_size=4, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=2, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64*7*7, 120)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 2)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
x = self.bn2(x)
x = self.relu(x)
# print("size", x.size())
x = x.view(-1, self.num_flat_features(x))
x = self.dropout(x)
# print("size", x.size())
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features | [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool2d"
] | 1.3.1 | kaderghal/ADNI_Data_processing | 454462d3913d77e3bc4de2b9725b456301c7b351 |
3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
import torch
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.logger import setup_logger
from pytorch3d.io import save_obj
from pytorch3d.structures import Meshes
# required so that .register() calls are executed in module scope
import meshrcnn.data # noqa
import meshrcnn.modeling # noqa
import meshrcnn.utils # noqa
from meshrcnn.config import get_meshrcnn_cfg_defaults
from meshrcnn.evaluation import transform_meshes_to_camera_coord_system
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/pix3d/meshrcnn_R50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input image")
parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
args = get_parser().parse_args()
from meshrcnn.data.datasets.register_pix3d import register_pix3d
register_pix3d(args.opts[1])
import cv2
logger = logging.getLogger("demo")
class VisualizationDemo(object):
def __init__(self, cfg, vis_highest_scoring=True, output_dir="./vis"):
"""
Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction
"""
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device("cpu")
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir
def run_on_image(self, image, focal_length=10.0):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
focal_length (float): the focal_length of the image
Returns:
predictions (dict): the output of the model.
"""
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
# camera matrix
imsize = [image.shape[0], image.shape[1]]
# focal <- focal * image_width / 32
focal_length = image.shape[1] / 32 * focal_length
K = [focal_length, image.shape[1] / 2, image.shape[0] / 2]
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(
verts=[mesh[0] for mesh in instances.pred_meshes],
faces=[mesh[1] for mesh in instances.pred_meshes],
)
pred_dz = instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1])
tc = pred_dz.abs().max() + 1.0
zranges = torch.stack(
[
torch.stack(
[
tc - tc * pred_dz[i] / 2.0 / focal_length,
tc + tc * pred_dz[i] / 2.0 / focal_length,
]
)
for i in range(len(meshes))
],
dim=0,
)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(
meshes, boxes.tensor, zranges, Ks, imsize
)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(
det_id,
image,
boxes.tensor[det_id],
labels[det_id],
scores[det_id],
masks[det_id],
meshes[det_id],
)
return predictions
def visualize_prediction(
self, det_id, image, box, label, score, mask, mesh, alpha=0.6, dpi=200
):
mask_color = np.array(self.colors[label], dtype=np.float32)
cat_name = self.cat_names[label]
thickness = max([int(np.ceil(0.001 * image.shape[0])), 1])
box_color = (0, 255, 0) # '#00ff00', green
text_color = (218, 227, 218) # gray
composite = image.copy().astype(np.float32)
# overlay mask
idx = mask.nonzero()
composite[idx[:, 0], idx[:, 1], :] *= 1.0 - alpha
composite[idx[:, 0], idx[:, 1], :] += alpha * mask_color
# overlay box
(x0, y0, x1, y1) = (int(x + 0.5) for x in box)
composite = cv2.rectangle(
composite, (x0, y0), (x1, y1), color=box_color, thickness=thickness
)
composite = composite.astype(np.uint8)
# overlay text
font_scale = 0.001 * image.shape[0]
font_thickness = thickness
font = cv2.FONT_HERSHEY_TRIPLEX
text = "%s %.3f" % (cat_name, score)
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, font_thickness)
# Place text background.
if x0 + text_w > composite.shape[1]:
x0 = composite.shape[1] - text_w
if y0 - int(1.2 * text_h) < 0:
y0 = int(1.2 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(composite, back_topleft, back_bottomright, box_color, -1)
# Show text
text_bottomleft = x0, y0 - int(0.2 * text_h)
cv2.putText(
composite,
text,
text_bottomleft,
font,
font_scale,
text_color,
thickness=font_thickness,
lineType=cv2.LINE_AA,
)
save_file = os.path.join(self.output_dir, "%d_mask_%s_%.3f.png" % (det_id, cat_name, score))
cv2.imwrite(save_file, composite[:, :, ::-1])
save_file = os.path.join(self.output_dir, "%d_mesh_%s_%.3f.obj" % (det_id, cat_name, score))
verts, faces = mesh.get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
def setup_cfg(args):
cfg = get_cfg()
get_meshrcnn_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger(name="demo")
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
im_name = args.input.split("/")[-1].split(".")[0]
demo = VisualizationDemo(
cfg, vis_highest_scoring=args.onlyhighest, output_dir=os.path.join(args.output, im_name)
)
# use PIL, to be consistent with evaluation
img = read_image(args.input, format="BGR")
predictions = demo.run_on_image(img, focal_length=args.focal_length)
logger.info("Predictions saved in %s" % (os.path.join(args.output, im_name)))
| [
"torch.device",
"torch.stack",
"torch.tensor"
] | 3 | ishanic/MeshRCNN-keypoints | fdc2c81ce57313207478ab9ff1699614addc5993 |
1.6 | from math import log, exp
from numpy import inf, zeros, zeros_like as np_zeros_like, arange, asarray, empty
from pandas import concat
from anndata import AnnData
from torch import cat, no_grad, randn, zeros_like, zeros as torch_zeros, ones, argmax
from torch.nn import Module, Linear, Sequential, RNNCell, Softplus, Parameter, Softmax
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from .Layers import Input_Block, FF_Block, LambdaLayer, Dual_Forward
class sciPENN_Model(Module):
def __init__(self, p_mod1, p_mod2, loss1, loss2, quantiles, categories):
super(sciPENN_Model, self).__init__()
h_size, drop_rate = 512, 0.25
self.RNNCell = RNNCell(h_size, h_size)
self.input_block = Input_Block(p_mod1, h_size, drop_rate, drop_rate)
self.skip_1 = FF_Block(h_size, drop_rate)
self.skip_2 = FF_Block(h_size, drop_rate)
self.skip_3 = FF_Block(h_size, drop_rate)
MSE_output = Linear(h_size, p_mod2)
if len(quantiles) > 0:
quantile_layer = []
quantile_layer.append(Linear(h_size, p_mod2 * len(quantiles)))
quantile_layer.append(LambdaLayer(lambda x: x.view(-1, p_mod2, len(quantiles))))
quantile_layer = Sequential(*quantile_layer)
self.mod2_out = Dual_Forward(MSE_output, quantile_layer)
else:
self.mod2_out = MSE_output
if categories is not None:
self.celltype_out = Sequential(Linear(h_size, len(categories)), Softmax(1))
self.forward = self.forward_transfer
self.categories_arr = empty((len(categories), ), dtype = 'object')
for cat in categories:
self.categories_arr[categories[cat]] = cat
else:
self.forward = self.forward_simple
self.categories_arr = None
self.quantiles = quantiles
self.loss1, self.loss2 = loss1, loss2
def forward_transfer(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': self.celltype_out(h.detach()), 'modality 2': self.mod2_out(h), 'embedding': h}
def forward_simple(self, x):
x = self.input_block(x)
h = self.RNNCell(x, zeros_like(x))
x = self.skip_1(x)
h = self.RNNCell(x, h)
x = self.skip_2(x)
h = self.RNNCell(x, h)
x = self.skip_3(x)
h = self.RNNCell(x, h)
return {'celltypes': None, 'modality 2': self.mod2_out(h), 'embedding': h}
def train_backprop(self, train_loader, val_loader,
n_epoch = 10000, ES_max = 30, decay_max = 10, decay_step = 0.1, lr = 10**(-3)):
optimizer = Adam(self.parameters(), lr = lr)
scheduler = StepLR(optimizer, step_size = 1, gamma = decay_step)
patience = 0
bestloss = inf
if self.categories_arr is None:
get_correct = lambda x: 0
else:
get_correct = lambda outputs: (argmax(outputs['celltypes'], axis = 1) == celltypes).sum()
for epoch in range(n_epoch):
with no_grad():
running_loss, rtype_acc = 0., 0.
self.eval()
for batch, inputs in enumerate(val_loader):
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
n_correct = get_correct(outputs)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
rtype_acc += n_correct
running_loss += mod2_loss.item() * len(mod2)
if self.categories_arr is None:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}")
else:
print(f"Epoch {epoch} prediction loss = {running_loss/len(val_loader):.3f}, validation accuracy = {rtype_acc/len(val_loader):.3f}")
patience += 1
if bestloss/1.005 > running_loss:
bestloss, patience = running_loss, 0
if (patience + 1) % decay_max == 0:
scheduler.step()
print(f"Decaying loss to {optimizer.param_groups[0]['lr']}")
if (patience + 1) > ES_max:
break
self.train()
for batch, inputs in enumerate(train_loader):
optimizer.zero_grad()
mod1, mod2, protein_bools, celltypes = inputs
outputs = self(mod1)
mod1_loss = self.loss1(outputs['celltypes'], celltypes)
mod2_loss = self.loss2(outputs['modality 2'], mod2, protein_bools)
loss = mod1_loss + mod2_loss
loss.backward()
optimizer.step()
def impute(self, impute_loader, requested_quantiles, denoise_genes, proteins):
imputed_test = proteins.copy()
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = self.fill_predicted(imputed_test.X[start:end], mod2_impute, bools)
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
return imputed_test
def embed(self, impute_loader, test_loader, cells_train, cells_test):
if cells_test is not None:
embedding = AnnData(zeros(shape = (len(cells_train) + len(cells_test), 512)))
embedding.obs = concat((cells_train, cells_test), join = 'inner')
else:
embedding = AnnData(zeros(shape = (len(cells_train), 512)))
embedding.obs = cells_train
self.eval()
start = 0
for mod1, bools, celltypes in impute_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
if cells_test is not None:
for mod1 in test_loader:
end = start + mod1.shape[0]
outputs = self(mod1)
embedding[start:end] = outputs['embedding'].detach().cpu().numpy()
start = end
return embedding
def fill_predicted(self, array, predicted, bools):
bools = bools.cpu().numpy()
return (1. - bools) * predicted.cpu().numpy() + array
def predict(self, test_loader, requested_quantiles, denoise_genes, proteins, cells):
imputed_test = AnnData(zeros(shape = (len(cells), len(proteins.var))))
imputed_test.obs = cells
imputed_test.var.index = proteins.var.index
if self.categories_arr is not None:
celltypes = ['None'] * len(cells)
for quantile in requested_quantiles:
imputed_test.layers['q' + str(round(100 * quantile))] = np_zeros_like(imputed_test.X)
self.eval()
start = 0
for mod1 in test_loader:
end = start + mod1.shape[0]
with no_grad():
outputs = self(mod1)
if self.categories_arr is not None:
predicted_types = argmax(outputs['celltypes'], axis = 1).cpu().numpy()
celltypes[start:end] = self.categories_arr[predicted_types].tolist()
if len(self.quantiles) > 0:
mod2_impute, mod2_quantile = outputs['modality 2']
else:
mod2_impute = outputs['modality 2']
imputed_test.X[start:end] = mod2_impute.cpu().numpy()
for quantile in requested_quantiles:
index = [i for i, q in enumerate(self.quantiles) if quantile == q][0]
q_name = 'q' + str(round(100 * quantile))
imputed_test.layers[q_name][start:end] = mod2_quantile[:, : , index].cpu().numpy()
start = end
if self.categories_arr is not None:
imputed_test.obs['transfered cell labels'] = celltypes
return imputed_test | [
"torch.nn.Linear",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.RNNCell",
"torch.nn.Softmax",
"torch.nn.Sequential",
"torch.no_grad",
"torch.zeros_like",
"torch.argmax"
] | 1.6.1 | jlakkis/sciPENN | 34afb2008a076e13c40965a76d3dd31d0c331652 |
0.4 | # MIT License
# Copyright (c) 2018 the NJUNMT-pytorch authors.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import random
import time
from copy import deepcopy
import numpy as np
import torch
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from src.data.data_iterator import DataIterator
from src.data.dataset import TextLineDataset, ZipDataset
from src.data.vocabulary import Vocabulary
from src.decoding import beam_search, ensemble_beam_search
from src.decoding.beam_search import nmt_lm_fusion_beam_search
from src.metric.bleu_scorer import SacreBLEUScorer
from src.models import build_model
from src.modules.criterions import NMTCriterion
from src.optim import Optimizer
from src.optim.lr_scheduler import ReduceOnPlateauScheduler, NoamScheduler, RsqrtScheduler
from src.utils.common_utils import *
from src.utils.configs import default_configs, pretty_configs
from src.utils.logging import *
from src.utils.moving_average import MovingAverage
BOS = Vocabulary.BOS
EOS = Vocabulary.EOS
PAD = Vocabulary.PAD
def set_seed(seed):
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def load_model_parameters(path, map_location="cpu"):
state_dict = torch.load(path, map_location=map_location)
if "model" in state_dict:
return state_dict["model"]
return state_dict
def split_shard(*inputs, split_size=1):
if split_size <= 1:
yield inputs
else:
lengths = [len(s) for s in inputs[-1]] #
sorted_indices = np.argsort(lengths)
# sorting inputs
inputs = [
[inp[ii] for ii in sorted_indices]
for inp in inputs
]
# split shards
total_batch = sorted_indices.shape[0] # total number of batches
if split_size >= total_batch:
yield inputs
else:
shard_size = total_batch // split_size
_indices = list(range(total_batch))[::shard_size] + [total_batch]
for beg, end in zip(_indices[:-1], _indices[1:]):
yield (inp[beg:end] for inp in inputs)
def prepare_data(seqs_x, seqs_y=None, cuda=False, batch_first=True):
"""
Args:
eval ('bool'): indicator for eval/infer.
Returns:
"""
def _np_pad_batch_2D(samples, pad, batch_first=True, cuda=True):
batch_size = len(samples)
sizes = [len(s) for s in samples]
max_size = max(sizes)
x_np = np.full((batch_size, max_size), fill_value=pad, dtype='int64')
for ii in range(batch_size):
x_np[ii, :sizes[ii]] = samples[ii]
if batch_first is False:
x_np = np.transpose(x_np, [1, 0])
x = torch.tensor(x_np)
if cuda is True:
x = x.cuda()
return x
seqs_x = list(map(lambda s: [BOS] + s + [EOS], seqs_x))
x = _np_pad_batch_2D(samples=seqs_x, pad=PAD,
cuda=cuda, batch_first=batch_first)
if seqs_y is None:
return x
seqs_y = list(map(lambda s: [BOS] + s + [EOS], seqs_y))
y = _np_pad_batch_2D(seqs_y, pad=PAD,
cuda=cuda, batch_first=batch_first)
return x, y
def compute_forward(model,
critic,
seqs_x,
eval=False,
normalization=1.0,
norm_by_words=False
):
"""
:type model: nn.Module
:type critic: NMTCriterion
"""
x_inp = seqs_x[:, :-1].contiguous()
x_label = seqs_x[:, 1:].contiguous()
words_norm = x_label.ne(PAD).float().sum(1)
if not eval:
model.train()
critic.train()
# For training
with torch.enable_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, reduce=False,
normalization=normalization)
if norm_by_words:
loss = loss.div(words_norm).sum()
else:
loss = loss.sum()
torch.autograd.backward(loss)
return loss.item()
else:
model.eval()
critic.eval()
# For compute loss
with torch.no_grad():
log_probs = model(x_inp)
loss = critic(inputs=log_probs, labels=x_label, normalization=normalization, reduce=True)
return loss.item()
def loss_validation(model, critic, valid_iterator):
"""
:type model: Transformer
:type critic: NMTCriterion
:type valid_iterator: DataIterator
"""
n_sents = 0
n_tokens = 0.0
sum_loss = 0.0
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
_, seqs_x = batch
n_sents += len(seqs_x)
n_tokens += sum(len(s) for s in seqs_x)
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=model,
critic=critic,
seqs_x=x,
eval=True)
if np.isnan(loss):
WARN("NaN detected!")
sum_loss += float(loss)
return float(sum_loss / n_sents)
def bleu_validation(uidx,
valid_iterator,
model,
bleu_scorer,
vocab_tgt,
batch_size,
valid_dir="./valid",
max_steps=10,
beam_size=5,
alpha=-1.0
):
model.eval()
numbers = []
trans = []
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator(batch_size=batch_size)
for batch in valid_iter:
seq_nums = batch[0]
numbers += seq_nums
seqs_x = batch[1]
infer_progress_bar.update(len(seqs_x))
x = prepare_data(seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = beam_search(nmt_model=model,
beam_size=beam_size,
max_steps=max_steps,
src_seqs=x, alpha=alpha)
word_ids = word_ids.cpu().numpy().tolist()
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
x_tokens = []
for wid in sent_t[0]:
if wid == EOS:
break
x_tokens.append(vocab_tgt.id2token(wid))
if len(x_tokens) > 0:
trans.append(vocab_tgt.tokenizer.detokenize(x_tokens))
else:
trans.append('%s' % vocab_tgt.id2token(EOS))
origin_order = np.argsort(numbers).tolist()
trans = [trans[ii] for ii in origin_order]
infer_progress_bar.close()
if not os.path.exists(valid_dir):
os.mkdir(valid_dir)
hyp_path = os.path.join(valid_dir, 'trans.iter{0}.txt'.format(uidx))
with open(hyp_path, 'w') as f:
for line in trans:
f.write('%s\n' % line)
with open(hyp_path) as f:
bleu_v = bleu_scorer.corpus_bleu(f)
return bleu_v
def load_pretrained_model(nmt_model, pretrain_path, device, exclude_prefix=None):
"""
Args:
nmt_model: model.
pretrain_path ('str'): path to pretrained model.
map_dict ('dict'): mapping specific parameter names to those names
in current model.
exclude_prefix ('dict'): excluding parameters with specific names
for pretraining.
Raises:
ValueError: Size not match, parameter name not match or others.
"""
if exclude_prefix is None:
exclude_prefix = []
if pretrain_path != "":
INFO("Loading pretrained model from {}".format(pretrain_path))
pretrain_params = torch.load(pretrain_path, map_location=device)
for name, params in pretrain_params.items():
flag = False
for pp in exclude_prefix:
if name.startswith(pp):
flag = True
break
if flag:
continue
INFO("Loading param: {}...".format(name))
try:
nmt_model.load_state_dict({name: params}, strict=False)
except Exception as e:
WARN("{}: {}".format(str(Exception), e))
INFO("Pretrained model loaded.")
def train(FLAGS):
"""
FLAGS:
saveto: str
reload: store_true
config_path: str
pretrain_path: str, default=""
model_name: str
log_path: str
"""
# write log of training to file.
write_log_to_file(os.path.join(FLAGS.log_path, "%s.log" % time.strftime("%Y%m%d-%H%M%S")))
GlobalNames.USE_GPU = FLAGS.use_gpu
if GlobalNames.USE_GPU:
CURRENT_DEVICE = "cpu"
else:
CURRENT_DEVICE = "cuda:0"
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
INFO(pretty_configs(configs))
# Add default configs
configs = default_configs(configs)
data_configs = configs['data_configs']
model_configs = configs['model_configs']
optimizer_configs = configs['optimizer_configs']
training_configs = configs['training_configs']
GlobalNames.SEED = training_configs['seed']
set_seed(GlobalNames.SEED)
best_model_prefix = os.path.join(FLAGS.saveto, FLAGS.model_name + GlobalNames.MY_BEST_MODEL_SUFFIX)
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
train_batch_size = training_configs["batch_size"] * max(1, training_configs["update_cycle"])
train_buffer_size = training_configs["buffer_size"] * max(1, training_configs["update_cycle"])
train_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['train_data'][0],
vocabulary=vocab_src,
max_len=data_configs['max_len'][0],
),
shuffle=training_configs['shuffle']
)
valid_bitext_dataset = ZipDataset(
TextLineDataset(data_path=data_configs['valid_data'][0],
vocabulary=vocab_src,
),
)
training_iterator = DataIterator(dataset=train_bitext_dataset,
batch_size=train_batch_size,
use_bucket=training_configs['use_bucket'],
buffer_size=train_buffer_size,
batching_func=training_configs['batching_key'])
valid_iterator = DataIterator(dataset=valid_bitext_dataset,
batch_size=training_configs['valid_batch_size'],
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
lrate = optimizer_configs['learning_rate']
is_early_stop = False
# ================================ Begin ======================================== #
# Build Model & Optimizer
# We would do steps below on after another
# 1. build models & criterion
# 2. move models & criterion to gpu if needed
# 3. load pre-trained model if needed
# 4. build optimizer
# 5. build learning rate scheduler if needed
# 6. load checkpoints if needed
# 0. Initial
model_collections = Collections()
checkpoint_saver = Saver(save_prefix="{0}.ckpt".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs['num_kept_checkpoints']
)
best_model_saver = BestKSaver(save_prefix="{0}.best".format(os.path.join(FLAGS.saveto, FLAGS.model_name)),
num_max_keeping=training_configs["num_kept_best_checkpoints"])
# 1. Build Model & Criterion
INFO('Building model...')
timer.tic()
nmt_model = build_model(n_words=vocab_src.max_n_words, **model_configs)
INFO(nmt_model)
params_total = sum([p.numel() for n, p in nmt_model.named_parameters()])
params_with_embedding = sum([p.numel() for n, p in nmt_model.named_parameters() if n.find('embedding') == -1])
INFO('Total parameters: {}'.format(params_total))
INFO('Total parameters (excluding word embeddings): {}'.format(params_with_embedding))
critic = NMTCriterion(label_smoothing=model_configs['label_smoothing'])
INFO(critic)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# 2. Move to GPU
if GlobalNames.USE_GPU:
nmt_model = nmt_model.cuda()
critic = critic.cuda()
# 3. Load pretrained model if needed
load_pretrained_model(nmt_model, FLAGS.pretrain_path, exclude_prefix=None, device=CURRENT_DEVICE)
# 4. Build optimizer
INFO('Building Optimizer...')
optim = Optimizer(name=optimizer_configs['optimizer'],
model=nmt_model,
lr=lrate,
grad_clip=optimizer_configs['grad_clip'],
optim_args=optimizer_configs['optimizer_params']
)
# 5. Build scheduler for optimizer if needed
if optimizer_configs['schedule_method'] is not None:
if optimizer_configs['schedule_method'] == "loss":
scheduler = ReduceOnPlateauScheduler(optimizer=optim,
**optimizer_configs["scheduler_configs"]
)
elif optimizer_configs['schedule_method'] == "noam":
scheduler = NoamScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
elif optimizer_configs["schedule_method"] == "rsqrt":
scheduler = RsqrtScheduler(optimizer=optim, **optimizer_configs['scheduler_configs'])
else:
WARN("Unknown scheduler name {0}. Do not use lr_scheduling.".format(optimizer_configs['schedule_method']))
scheduler = None
else:
scheduler = None
# 6. build moving average
if training_configs['moving_average_method'] is not None:
ma = MovingAverage(moving_average_method=training_configs['moving_average_method'],
named_params=nmt_model.named_parameters(),
alpha=training_configs['moving_average_alpha'])
else:
ma = None
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# Reload from latest checkpoint
if FLAGS.reload:
checkpoint_saver.load_latest(model=nmt_model, optim=optim, lr_scheduler=scheduler,
collections=model_collections, ma=ma)
# ================================================================================== #
# Prepare training
eidx = model_collections.get_collection("eidx", [0])[-1]
uidx = model_collections.get_collection("uidx", [0])[-1]
bad_count = model_collections.get_collection("bad_count", [0])[-1]
oom_count = model_collections.get_collection("oom_count", [0])[-1]
summary_writer = SummaryWriter(log_dir=FLAGS.log_path)
cum_samples = 0
cum_words = 0
valid_loss = best_valid_loss = float('inf') # Max Float
saving_files = []
# Timer for computing speed
timer_for_speed = Timer()
timer_for_speed.tic()
INFO('Begin training...')
while True:
summary_writer.add_scalar("Epoch", (eidx + 1), uidx)
# Build iterator and progress bar
training_iter = training_iterator.build_generator()
training_progress_bar = tqdm(desc=' - (Epc {}, Upd {}) '.format(eidx, uidx),
total=len(training_iterator),
unit="sents"
)
for batch in training_iter:
uidx += 1
if optimizer_configs["schedule_method"] is not None and optimizer_configs["schedule_method"] != "loss":
scheduler.step(global_step=uidx)
seqs_x = batch
n_samples_t = len(seqs_x)
n_words_t = sum(len(s) for s in seqs_x)
cum_samples += n_samples_t
cum_words += n_words_t
train_loss = 0.
optim.zero_grad()
try:
# Prepare data
for seqs_x_t, in split_shard(seqs_x, split_size=training_configs['update_cycle']):
x = prepare_data(seqs_x_t, cuda=GlobalNames.USE_GPU)
loss = compute_forward(model=nmt_model,
critic=critic,
seqs_x=x,
eval=False,
normalization=n_samples_t,
norm_by_words=training_configs["norm_by_words"])
train_loss += loss / x.size(1)
optim.step()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory, skipping batch')
oom_count += 1
optim.zero_grad()
else:
raise e
if ma is not None and eidx >= training_configs['moving_average_start_epoch']:
ma.step()
training_progress_bar.update(n_samples_t)
training_progress_bar.set_description(' - (Epc {}, Upd {}) '.format(eidx, uidx))
training_progress_bar.set_postfix_str(
'TrainLoss: {:.2f}, ValidLoss(best): {:.2f} ({:.2f})'.format(train_loss, valid_loss, best_valid_loss))
summary_writer.add_scalar("train_loss", scalar_value=train_loss, global_step=uidx)
# ================================================================================== #
# Display some information
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['disp_freq']):
# words per second and sents per second
words_per_sec = cum_words / (timer.toc(return_seconds=True))
sents_per_sec = cum_samples / (timer.toc(return_seconds=True))
lrate = list(optim.get_lrate())[0]
summary_writer.add_scalar("Speed(words/sec)", scalar_value=words_per_sec, global_step=uidx)
summary_writer.add_scalar("Speed(sents/sen)", scalar_value=sents_per_sec, global_step=uidx)
summary_writer.add_scalar("lrate", scalar_value=lrate, global_step=uidx)
summary_writer.add_scalar("oom_count", scalar_value=oom_count, global_step=uidx)
# Reset timer
timer.tic()
cum_words = 0
cum_samples = 0
# ================================================================================== #
# Loss Validation & Learning rate annealing
if should_trigger_by_steps(global_step=uidx, n_epoch=eidx, every_n_step=training_configs['loss_valid_freq'],
debug=FLAGS.debug):
if ma is not None:
origin_state_dict = deepcopy(nmt_model.state_dict())
nmt_model.load_state_dict(ma.export_ma_params(), strict=False)
valid_loss = loss_validation(model=nmt_model,
critic=critic,
valid_iterator=valid_iterator,
)
model_collections.add_to_collection("history_losses", valid_loss)
min_history_loss = np.array(model_collections.get_collection("history_losses")).min()
summary_writer.add_scalar("loss", valid_loss, global_step=uidx)
summary_writer.add_scalar("best_loss", min_history_loss, global_step=uidx)
best_valid_loss = min_history_loss
if ma is not None:
nmt_model.load_state_dict(origin_state_dict)
del origin_state_dict
if optimizer_configs["schedule_method"] == "loss":
scheduler.step(global_step=uidx, metric=best_valid_loss)
# If model get new best valid bleu score
if valid_loss < best_valid_loss:
bad_count = 0
if is_early_stop is False:
# 1. save the best model's parameters
torch.save(nmt_model.state_dict(), best_model_prefix + ".final")
# 2. save the best checkpoint
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
best_model_saver.save(global_step=uidx, metric=valid_loss,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
else:
bad_count += 1
# At least one epoch should be traversed
if bad_count >= training_configs['early_stop_patience'] and eidx > 0:
is_early_stop = True
WARN("Early Stop!")
summary_writer.add_scalar("bad_count", bad_count, uidx)
INFO("{0} Loss: {1:.2f} lrate: {2:6f} patience: {3}".format(
uidx, valid_loss, lrate, bad_count
))
# ================================================================================== #
# Saving checkpoints
if should_trigger_by_steps(uidx, eidx, every_n_step=training_configs['save_freq'], debug=FLAGS.debug):
model_collections.add_to_collection("uidx", uidx)
model_collections.add_to_collection("eidx", eidx)
model_collections.add_to_collection("bad_count", bad_count)
if not is_early_stop:
checkpoint_saver.save(global_step=uidx,
model=nmt_model,
optim=optim,
lr_scheduler=scheduler,
collections=model_collections,
ma=ma)
training_progress_bar.close()
eidx += 1
if eidx > training_configs["max_epochs"]:
break
def nmt_lm_fusion_translate(FLAGS):
GlobalNames.USE_GPU = FLAGS.use_gpu
config_path = os.path.abspath(FLAGS.config_path)
with open(config_path.strip()) as f:
configs = yaml.load(f)
data_configs = configs['data_configs']
nmt_model_configs = configs['nmt_model_configs']
lm_model_configs = configs['lm_model_configs']
timer = Timer()
# ================================================================================== #
# Load Data
INFO('Loading data...')
timer.tic()
# Generate target dictionary
vocab_src = Vocabulary(**data_configs["vocabularies"][0])
vocab_tgt = Vocabulary(**data_configs["vocabularies"][1])
valid_dataset = TextLineDataset(data_path=FLAGS.source_path,
vocabulary=vocab_src)
valid_iterator = DataIterator(dataset=valid_dataset,
batch_size=FLAGS.batch_size,
use_bucket=True, buffer_size=100000, numbering=True)
INFO('Done. Elapsed time {0}'.format(timer.toc()))
# ================================================================================== #
# Build Model & Sampler & Validation
INFO('Building model...')
timer.tic()
nmt_model_path = FLAGS.nmt_model_path
lm_model_path = FLAGS.lm_model_path
nmt_model = build_model(n_src_vocab=vocab_src.max_n_words,
n_tgt_vocab=vocab_tgt.max_n_words, **nmt_model_configs)
lm_model = build_model(n_words=vocab_tgt.max_n_words, **lm_model_configs)
nmt_model.eval()
lm_model.eval()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Reloading model parameters...')
timer.tic()
nmt_params = load_model_parameters(nmt_model_path, map_location="cpu")
lm_params = load_model_parameters(lm_model_path, map_location="cpu")
nmt_model.load_state_dict(nmt_params)
lm_model.load_state_dict(lm_params)
if GlobalNames.USE_GPU:
nmt_model.cuda()
lm_model.cuda()
INFO('Done. Elapsed time {0}'.format(timer.toc()))
INFO('Begin...')
result_numbers = []
result = []
n_words = 0
timer.tic()
infer_progress_bar = tqdm(total=len(valid_iterator),
desc=' - (Infer) ',
unit="sents")
valid_iter = valid_iterator.build_generator()
for batch in valid_iter:
numbers, seqs_x = batch
batch_size_t = len(seqs_x)
x = prepare_data(seqs_x=seqs_x, cuda=GlobalNames.USE_GPU)
with torch.no_grad():
word_ids = nmt_lm_fusion_beam_search(nmt_model=nmt_model, lm_model=lm_model,
beam_size=FLAGS.beam_size,
max_steps=FLAGS.max_steps,
src_seqs=x,
alpha=FLAGS.alpha,
beta=FLAGS.beta)
word_ids = word_ids.cpu().numpy().tolist()
result_numbers += numbers
# Append result
for sent_t in word_ids:
sent_t = [[wid for wid in line if wid != PAD] for line in sent_t]
result.append(sent_t)
n_words += len(sent_t[0])
infer_progress_bar.update(batch_size_t)
infer_progress_bar.close()
INFO('Done. Speed: {0:.2f} words/sec'.format(n_words / (timer.toc(return_seconds=True))))
translation = []
for sent in result:
samples = []
for trans in sent:
sample = []
for w in trans:
if w == vocab_tgt.EOS:
break
sample.append(vocab_tgt.id2token(w))
samples.append(vocab_tgt.tokenizer.detokenize(sample))
translation.append(samples)
# resume the ordering
origin_order = np.argsort(result_numbers).tolist()
translation = [translation[ii] for ii in origin_order]
with open(FLAGS.saveto, 'w') as f:
for trans in translation:
f.write("%s\n"%trans[0])
if __name__ == '__main__':
_args = {
"model_name": "test_rnnlm",
"reload": False,
"config_path": "./configs/test_rnnlm.yaml",
"debug": True,
"use_gpu": False,
"task": "lm",
"log_path": "/tmp",
"saveto": "/tmp",
"valid_path": "/tmp",
}
from src.bin import train as _train
_train.run(**_args) | [
"torch.cuda.manual_seed_all",
"torch.autograd.backward",
"torch.enable_grad",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
] | 0.4.0 | skysky77/MGNMT | 19dded399a310cd118eee09bd37d657746d11cf1 |
1.4 | import sys
sys.path.append('../')
import torch
import numpy as np
import random
import math
import time
import argparse
from data_tlp_cite import DataHelper_t
from torch.utils.data import DataLoader
from model import Model
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
FType = torch.FloatTensor
LType = torch.LongTensor
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def main(args):
setup_seed(args.seed)
Data = DataHelper_t(args.file_path, args.node_feature_path, args.neg_size, args.hist_len, args.directed,
tlp_flag=args.tlp_flag)
loader = DataLoader(Data, batch_size=args.batch_size, shuffle=False, num_workers=5)
model = Model(args).to(device)
model.load_state_dict(torch.load('../res/cite/model.pkl'))
s_emb_list = []
t_emb_list = []
dup_s_emb_list = []
neg_embs_list = []
loss_list = []
model.eval()
for i_batch, sample_batched in enumerate(loader):
loss, s_emb, t_emb, dup_s_emb, neg_embs = model.forward(
sample_batched['s_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['s_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['t_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_self_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_one_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['neg_two_hop_feat'].type(FType).reshape(-1, args.feat_dim).to(device),
sample_batched['event_time'].type(FType).to(device),
sample_batched['s_history_times'].type(FType).to(device),
sample_batched['s_his_his_times_list'].type(FType).to(device),
sample_batched['t_history_times'].type(FType).to(device),
sample_batched['t_his_his_times_list'].type(FType).to(device),
sample_batched['neg_his_times_list'].type(FType).to(device),
sample_batched['neg_his_his_times_list'].type(FType).to(device),
sample_batched['s_edge_rate'].type(FType).to(device),
training=False
)
s_emb_list.append(s_emb)
t_emb_list.append(t_emb)
dup_s_emb_list.append(dup_s_emb.reshape(-1, args.out_dim))
neg_embs_list.append(neg_embs.reshape(-1, args.out_dim))
loss_list.append(loss)
s_emb_list = torch.cat(s_emb_list, dim=0)
t_emb_list = torch.cat(t_emb_list, dim=0)
dup_s_emb_list = torch.cat(dup_s_emb_list, dim=0)
neg_embs_list = torch.cat(neg_embs_list, dim=0)
truth = torch.ones(s_emb_list.size(0), dtype=torch.int)
truth_neg = torch.zeros(neg_embs_list.size(0), dtype=torch.int)
s_list = torch.cat((s_emb_list, dup_s_emb_list), dim=0)
t_list = torch.cat((t_emb_list, neg_embs_list), dim=0)
truth_list = torch.cat((truth, truth_neg), dim=0)
dif_list = torch.abs(s_list - t_list)
x_train, x_test, y_train, y_test = train_test_split(dif_list, truth_list, test_size=1 - args.train_ratio,
random_state=args.seed, stratify=truth_list)
lr = LogisticRegression(max_iter=10000)
lr.fit(x_train, y_train)
y_test_pred = lr.predict(x_test)
acc = accuracy_score(y_test, y_test_pred)
f1 = f1_score(y_test, y_test_pred)
print('acc:{}'.format(round(acc, 4)))
print('f1:{}'.format(round(f1, 4)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default='./data/cite/emb_edges.pt')
parser.add_argument('--node_feature_path', type=str, default='./data/cite/sorted_emb_feat.pt')
parser.add_argument('--neg_size', type=int, default=1)
parser.add_argument('--hist_len', type=int, default=10)
parser.add_argument('--directed', type=bool, default=False)
parser.add_argument('--epoch_num', type=int, default=10, help='epoch number')
parser.add_argument('--tlp_flag', type=bool, default=True)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--hid_dim', type=int, default=16)
parser.add_argument('--feat_dim', type=int, default=128)
parser.add_argument('--out_dim', type=int, default=16)
parser.add_argument('--seed', type=int, default=4)
parser.add_argument('--ncoef', type=float, default=0.01)
parser.add_argument('--l2_reg', type=float, default=0.001)
parser.add_argument('--train_ratio', type=float, default=0.8)
args = parser.parse_args()
start = time.perf_counter()
main(args) | [
"torch.cat",
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.abs",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.4.0 | WenZhihao666/TREND | ca4b17139b5f24d44d9421fed92021eb7a95ed6d |
1.6 | # -*- coding: utf-8 -*-
# @Time : 2020/10/3
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGNNLS
################################################
Reference:
Hongwei Wang et al. "Knowledge-aware Graph Neural Networks with Label Smoothness Regularization
for Recommender Systems." in KDD 2019.
Reference code:
https://github.com/hwwang55/KGNN-LS
"""
import torch
import torch.nn as nn
import numpy as np
import random
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGNNLS(KnowledgeRecommender):
r"""KGNN-LS is a knowledge-based recommendation model.
KGNN-LS transforms the knowledge graph into a user-specific weighted graph and then apply a graph neural network to
compute personalized item embeddings. To provide better inductive bias, KGNN-LS relies on label smoothness
assumption, which posits that adjacent items in the knowledge graph are likely to have similar user relevance
labels/scores. Label smoothness provides regularization over the edge weights and it is equivalent to a label
propagation scheme on a graph.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGNNLS, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
self.neighbor_sample_size = config['neighbor_sample_size']
self.aggregator_class = config['aggregator'] # which aggregator to use
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.reg_weight = config['reg_weight'] # weight of l2 regularization
# weight of label Smoothness regularization
self.ls_weight = config['ls_weight']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors and construct interaction table
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
inter_feat = dataset.dataset.inter_feat.values
pos_users = torch.from_numpy(inter_feat[:, 0])
pos_items = torch.from_numpy(inter_feat[:, 1])
pos_label = torch.ones(pos_items.shape)
pos_interaction_table, self.offset = self.get_interaction_table(
pos_users, pos_items, pos_label)
self.interaction_table = self.sample_neg_interaction(
pos_interaction_table, self.offset)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def get_interaction_table(self, user_id, item_id, y):
r"""Get interaction_table that is used for fetching user-item interaction label in LS regularization.
Args:
user_id(torch.Tensor): the user id in user-item interactions, shape: [n_interactions, 1]
item_id(torch.Tensor): the item id in user-item interactions, shape: [n_interactions, 1]
y(torch.Tensor): the label in user-item interactions, shape: [n_interactions, 1]
Returns:
tuple:
- interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
- offset(int): The offset that is used for calculating the key(index) in interaction_table
"""
offset = len(str(self.n_entities))
offset = 10 ** offset
keys = user_id * offset + item_id
keys = keys.int().cpu().numpy().tolist()
values = y.float().cpu().numpy().tolist()
interaction_table = dict(zip(keys, values))
return interaction_table, offset
def sample_neg_interaction(self, pos_interaction_table, offset):
r"""Sample neg_interaction to construct train data.
Args:
pos_interaction_table(dict): the interaction_table that only contains pos_interaction.
offset(int): The offset that is used for calculating the key(index) in interaction_table
Returns:
interaction_table(dict): key: user_id * 10^offset + item_id; value: y_{user_id, item_id}
"""
pos_num = len(pos_interaction_table)
neg_num = 0
neg_interaction_table = {}
while neg_num < pos_num:
user_id = random.randint(0, self.n_users)
item_id = random.randint(0, self.n_items)
keys = user_id * offset + item_id
if keys not in pos_interaction_table:
neg_interaction_table[keys] = 0.
neg_num += 1
interaction_table = {**pos_interaction_table, **neg_interaction_table}
return interaction_table
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity (torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation (torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
# mix_neighbor_vectors
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(self.softmax(user_relation_scores),
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_agg = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
res = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return res
def label_smoothness_predict(self, user_embeddings, user, entities, relations):
r"""Predict the label of items by label smoothness.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size*2, embedding_size],
user(torch.FloatTensor): the index of users, shape: [batch_size*2]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size*2, 1],
[batch_size*2, n_neighbor],
[batch_size*2, n_neighbor^2],
...,
[batch_size*2, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
predicted_labels(torch.FloatTensor): The predicted label of items, shape: [batch_size*2]
"""
# calculate initial labels; calculate updating masks for label propagation
entity_labels = []
# True means the label of this item is reset to initial value during label propagation
reset_masks = []
holdout_item_for_user = None
for entities_per_iter in entities:
users = torch.unsqueeze(user, dim=1) # [batch_size, 1]
user_entity_concat = users * self.offset + \
entities_per_iter # [batch_size, n_neighbor^i]
# the first one in entities is the items to be held out
if holdout_item_for_user is None:
holdout_item_for_user = user_entity_concat
def lookup_interaction_table(x, _):
x = int(x)
label = self.interaction_table.setdefault(x, 0.5)
return label
initial_label = user_entity_concat.clone().cpu().double()
initial_label.map_(initial_label, lookup_interaction_table)
initial_label = initial_label.float().to(self.device)
# False if the item is held out
holdout_mask = (holdout_item_for_user - user_entity_concat).bool()
# True if the entity is a labeled item
reset_mask = (initial_label - 0.5).bool()
reset_mask = torch.logical_and(
reset_mask, holdout_mask) # remove held-out items
initial_label = holdout_mask.float() * initial_label + torch.logical_not(
holdout_mask).float() * 0.5 # label initialization
reset_masks.append(reset_mask)
entity_labels.append(initial_label)
# we do not need the reset_mask for the last iteration
reset_masks = reset_masks[:-1]
# label propagation
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_labels_next_iter = []
for hop in range(self.n_iter - i):
masks = reset_masks[hop]
self_labels = entity_labels[hop]
neighbor_labels = torch.reshape(entity_labels[hop + 1],
[self.batch_size, -1, self.neighbor_sample_size])
neighbor_relations = torch.reshape(relation_vectors[hop],
[self.batch_size, -1, self.neighbor_sample_size,
self.embedding_size])
# mix_neighbor_labels
user_embeddings = torch.reshape(user_embeddings,
[self.batch_size, 1, 1, self.embedding_size]) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
neighbors_aggregated_label = torch.mean(user_relation_scores_normalized * neighbor_labels,
dim=2) # [batch_size, -1, dim] # [batch_size, -1]
output = masks.float() * self_labels + torch.logical_not(masks).float() * \
neighbors_aggregated_label
entity_labels_next_iter.append(output)
entity_labels = entity_labels_next_iter
predicted_labels = entity_labels[0].squeeze(-1)
return predicted_labels
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_ls_loss(self, user, item, target):
r"""Calculate label smoothness loss.
Args:
user(torch.FloatTensor): the index of users, shape: [batch_size*2],
item(torch.FloatTensor): the index of items, shape: [batch_size*2],
target(torch.FloatTensor): the label of user-item, shape: [batch_size*2],
Returns:
ls_loss: label smoothness loss
"""
user_e = self.user_embedding(user)
entities, relations = self.get_neighbors(item)
predicted_labels = self.label_smoothness_predict(
user_e, user, entities, relations)
ls_loss = self.bce_loss(predicted_labels, target)
return ls_loss
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
users = torch.cat((user, user))
items = torch.cat((pos_item, neg_item))
user_e, item_e = self.forward(users, items)
predict = torch.mul(user_e, item_e).sum(dim=1)
rec_loss = self.bce_loss(predict, target)
ls_loss = self.calculate_ls_loss(users, items, target)
l2_loss = self.l2_loss(user_e, item_e)
loss = rec_loss + self.ls_weight * ls_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.ones",
"torch.logical_and",
"torch.nn.BCEWithLogitsLoss",
"torch.reshape",
"torch.mul",
"torch.nn.Softmax",
"torch.unsqueeze",
"torch.logical_not",
"torch.index_select",
"torch.nn.Embedding",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.from_numpy",
"torch.flatten",
"torch.mean"
] | 1.6.0 | xingkongxiaxia/xx | ce51d75406592d6bc25bb803f773f0788496fd97 |
1.6 | # -*- coding: utf-8 -*-
# @Time : 2020/10/6
# @Author : Changxin Tian
# @Email : [email protected]
r"""
KGCN
################################################
Reference:
Hongwei Wang et al. "Knowledge graph convolution networks for recommender systems." in WWW 2019.
Reference code:
https://github.com/hwwang55/KGCN
"""
import torch
import torch.nn as nn
import numpy as np
from recbole.utils import InputType
from recbole.model.abstract_recommender import KnowledgeRecommender
from recbole.model.loss import BPRLoss, EmbLoss
from recbole.model.init import xavier_normal_initialization
class KGCN(KnowledgeRecommender):
r"""KGCN is a knowledge-based recommendation model that captures inter-item relatedness effectively by mining their
associated attributes on the KG. To automatically discover both high-order structure information and semantic
information of the KG, we treat KG as an undirected graph and sample from the neighbors for each entity in the KG
as their receptive field, then combine neighborhood information with bias when calculating the representation of a
given entity.
"""
input_type = InputType.PAIRWISE
def __init__(self, config, dataset):
super(KGCN, self).__init__(config, dataset)
# load parameters info
self.embedding_size = config['embedding_size']
# number of iterations when computing entity representation
self.n_iter = config['n_iter']
self.aggregator_class = config['aggregator'] # which aggregator to use
self.reg_weight = config['reg_weight'] # weight of l2 regularization
self.neighbor_sample_size = config['neighbor_sample_size']
# define embedding
self.user_embedding = nn.Embedding(self.n_users, self.embedding_size)
self.entity_embedding = nn.Embedding(
self.n_entities, self.embedding_size)
self.relation_embedding = nn.Embedding(
self.n_relations + 1, self.embedding_size)
# sample neighbors
kg_graph = dataset.kg_graph(form='coo', value_field='relation_id')
adj_entity, adj_relation = self.construct_adj(kg_graph)
self.adj_entity, self.adj_relation = adj_entity.to(
self.device), adj_relation.to(self.device)
# define function
self.softmax = nn.Softmax(dim=-1)
self.linear_layers = torch.nn.ModuleList()
for i in range(self.n_iter):
self.linear_layers.append(nn.Linear(
self.embedding_size if not self.aggregator_class == 'concat' else self.embedding_size * 2,
self.embedding_size))
self.ReLU = nn.ReLU()
self.Tanh = nn.Tanh()
self.bce_loss = nn.BCEWithLogitsLoss()
self.l2_loss = EmbLoss()
# parameters initialization
self.apply(xavier_normal_initialization)
def construct_adj(self, kg_graph):
r"""Get neighbors and corresponding relations for each entity in the KG.
Args:
kg_graph(scipy.sparse.coo_matrix): an undirected graph
Returns:
tuple:
- adj_entity(torch.LongTensor): each line stores the sampled neighbor entities for a given entity,
shape: [n_entities, neighbor_sample_size]
- adj_relation(torch.LongTensor): each line stores the corresponding sampled neighbor relations,
shape: [n_entities, neighbor_sample_size]
"""
# self.logger.info('constructing knowledge graph ...')
# treat the KG as an undirected graph
kg_dict = dict()
for triple in zip(kg_graph.row, kg_graph.data, kg_graph.col):
head = triple[0]
relation = triple[1]
tail = triple[2]
if head not in kg_dict:
kg_dict[head] = []
kg_dict[head].append((tail, relation))
if tail not in kg_dict:
kg_dict[tail] = []
kg_dict[tail].append((head, relation))
# self.logger.info('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
entity_num = kg_graph.shape[0]
adj_entity = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
adj_relation = np.zeros(
[entity_num, self.neighbor_sample_size], dtype=np.int64)
for entity in range(entity_num):
if entity not in kg_dict.keys():
adj_entity[entity] = np.array(
[entity] * self.neighbor_sample_size)
adj_relation[entity] = np.array(
[0] * self.neighbor_sample_size)
continue
neighbors = kg_dict[entity]
n_neighbors = len(neighbors)
if n_neighbors >= self.neighbor_sample_size:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=False)
else:
sampled_indices = np.random.choice(list(range(n_neighbors)), size=self.neighbor_sample_size,
replace=True)
adj_entity[entity] = np.array(
[neighbors[i][0] for i in sampled_indices])
adj_relation[entity] = np.array(
[neighbors[i][1] for i in sampled_indices])
return torch.from_numpy(adj_entity), torch.from_numpy(adj_relation)
def get_neighbors(self, items):
r"""Get neighbors and corresponding relations for each entity in items from adj_entity and adj_relation.
Args:
items(torch.LongTensor): The input tensor that contains item's id, shape: [batch_size, ]
Returns:
tuple:
- entities(list): Entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
- relations(list): Relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for
entities. Relations have the same shape as entities.
"""
items = torch.unsqueeze(items, dim=1)
entities = [items]
relations = []
for i in range(self.n_iter):
index = torch.flatten(entities[i])
neighbor_entities = torch.reshape(torch.index_select(
self.adj_entity, 0, index), (self.batch_size, -1))
neighbor_relations = torch.reshape(torch.index_select(
self.adj_relation, 0, index), (self.batch_size, -1))
entities.append(neighbor_entities)
relations.append(neighbor_relations)
return entities, relations
def mix_neighbor_vectors(self, neighbor_vectors, neighbor_relations, user_embeddings):
r"""Mix neighbor vectors on user-specific graph.
Args:
neighbor_vectors(torch.FloatTensor): The embeddings of neighbor entities(items),
shape: [batch_size, -1, neighbor_sample_size, embedding_size]
neighbor_relations(torch.FloatTensor): The embeddings of neighbor relations,
shape: [batch_size, -1, neighbor_sample_size, embedding_size]
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
Returns:
neighbors_aggregated(torch.FloatTensor): The neighbors aggregated embeddings,
shape: [batch_size, -1, embedding_size]
"""
avg = False
if not avg:
user_embeddings = torch.reshape(user_embeddings,
(self.batch_size, 1, 1, self.embedding_size)) # [batch_size, 1, 1, dim]
user_relation_scores = torch.mean(user_embeddings * neighbor_relations,
dim=-1) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = self.softmax(
user_relation_scores) # [batch_size, -1, n_neighbor]
user_relation_scores_normalized = torch.unsqueeze(user_relation_scores_normalized,
dim=-1) # [batch_size, -1, n_neighbor, 1]
neighbors_aggregated = torch.mean(user_relation_scores_normalized * neighbor_vectors,
dim=2) # [batch_size, -1, dim]
else:
neighbors_aggregated = torch.mean(
neighbor_vectors, dim=2) # [batch_size, -1, dim]
return neighbors_aggregated
def aggregate(self, user_embeddings, entities, relations):
r"""For each item, aggregate the entity representation and its neighborhood representation into a single vector.
Args:
user_embeddings(torch.FloatTensor): The embeddings of users, shape: [batch_size, embedding_size]
entities(list): entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items.
dimensions of entities: {[batch_size, 1],
[batch_size, n_neighbor],
[batch_size, n_neighbor^2],
...,
[batch_size, n_neighbor^n_iter]}
relations(list): relations is a list of i-iter (i = 0, 1, ..., n_iter) corresponding relations for entities.
relations have the same shape as entities.
Returns:
item_embeddings(torch.FloatTensor): The embeddings of items, shape: [batch_size, embedding_size]
"""
entity_vectors = [self.entity_embedding(i) for i in entities]
relation_vectors = [self.relation_embedding(i) for i in relations]
for i in range(self.n_iter):
entity_vectors_next_iter = []
for hop in range(self.n_iter - i):
shape = (self.batch_size, -1,
self.neighbor_sample_size, self.embedding_size)
self_vectors = entity_vectors[hop]
neighbor_vectors = torch.reshape(
entity_vectors[hop + 1], shape)
neighbor_relations = torch.reshape(
relation_vectors[hop], shape)
neighbors_agg = self.mix_neighbor_vectors(neighbor_vectors, neighbor_relations,
user_embeddings) # [batch_size, -1, dim]
if self.aggregator_class == 'sum':
output = torch.reshape(
self_vectors + neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'neighbor':
output = torch.reshape(
neighbors_agg, (-1, self.embedding_size)) # [-1, dim]
elif self.aggregator_class == 'concat':
# [batch_size, -1, dim * 2]
output = torch.cat([self_vectors, neighbors_agg], dim=-1)
output = torch.reshape(
output, (-1, self.embedding_size * 2)) # [-1, dim * 2]
else:
raise Exception("Unknown aggregator: " +
self.aggregator_class)
output = self.linear_layers[i](output)
# [batch_size, -1, dim]
output = torch.reshape(
output, [self.batch_size, -1, self.embedding_size])
if i == self.n_iter - 1:
vector = self.Tanh(output)
else:
vector = self.ReLU(output)
entity_vectors_next_iter.append(vector)
entity_vectors = entity_vectors_next_iter
item_embeddings = torch.reshape(
entity_vectors[0], (self.batch_size, self.embedding_size))
return item_embeddings
def forward(self, user, item):
self.batch_size = item.shape[0]
# [batch_size, dim]
user_e = self.user_embedding(user)
# entities is a list of i-iter (i = 0, 1, ..., n_iter) neighbors for the batch of items. dimensions of entities:
# {[batch_size, 1], [batch_size, n_neighbor], [batch_size, n_neighbor^2], ..., [batch_size, n_neighbor^n_iter]}
entities, relations = self.get_neighbors(item)
# [batch_size, dim]
item_e = self.aggregate(user_e, entities, relations)
return user_e, item_e
def calculate_loss(self, interaction):
user = interaction[self.USER_ID]
pos_item = interaction[self.ITEM_ID]
neg_item = interaction[self.NEG_ITEM_ID]
user_e, pos_item_e = self.forward(user, pos_item)
user_e, neg_item_e = self.forward(user, neg_item)
pos_item_score = torch.mul(user_e, pos_item_e).sum(dim=1)
neg_item_score = torch.mul(user_e, neg_item_e).sum(dim=1)
predict = torch.cat((pos_item_score, neg_item_score))
target = torch.zeros(
len(user) * 2, dtype=torch.float32).to(self.device)
target[:len(user)] = 1
rec_loss = self.bce_loss(predict, target)
l2_loss = self.l2_loss(user_e, pos_item_e, neg_item_e)
loss = rec_loss + self.reg_weight * l2_loss
return loss
def predict(self, interaction):
user = interaction[self.USER_ID]
item = interaction[self.ITEM_ID]
user_e, item_e = self.forward(user, item)
return torch.mul(user_e, item_e).sum(dim=1)
def full_sort_predict(self, interaction):
user_index = interaction[self.USER_ID]
item_index = torch.tensor(range(self.n_items)).to(self.device)
user = torch.unsqueeze(user_index, dim=1).repeat(
1, item_index.shape[0])
user = torch.flatten(user)
item = torch.unsqueeze(item_index, dim=0).repeat(
user_index.shape[0], 1)
item = torch.flatten(item)
user_e, item_e = self.forward(user, item)
score = torch.mul(user_e, item_e).sum(dim=1)
return score.view(-1)
| [
"torch.nn.Linear",
"torch.index_select",
"torch.cat",
"torch.mul",
"torch.nn.ModuleList",
"torch.nn.Softmax",
"torch.nn.Tanh",
"torch.unsqueeze",
"torch.nn.ReLU",
"torch.from_numpy",
"torch.mean",
"torch.nn.BCEWithLogitsLoss",
"torch.flatten",
"torch.nn.Embedding",
"torch.reshape"
] | 1.6.0 | xingkongxiaxia/xx | ce51d75406592d6bc25bb803f773f0788496fd97 |
0.4 | import os
import click
import numpy as np
from tqdm import tqdm
from models.model_loader import load_model
from torchvision.transforms import Compose
from dataset.data_transform import Resize, Rotation, ElasticAndSine, ColorGradGausNoise, AddWidth, Normalize, ToGray, OnlyElastic, OnlySine, ColorGrad, ColorGausNoise
from dataset.text_data import TextDataset, TextDatasetRandomFont
from dataset.collate_fn import text_collate
from utils.data_visualization import TbSummary
from lr_policy import StepLR, DannLR
import pickle as pkl
import glob
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from warpctc_pytorch import CTCLoss
from test import test
from models.new_vat import VATLoss, VATLossSign, LabeledATLoss, LabeledAtAndUnlabeledTestVatLoss, VATonRnnSign, VATonRnnCnnSign, VATonCnnSign
from dataset.dataset_metadata import SynthDataInfo
@click.command()
@click.option('--base-data-dir', type=str,
default=os.path.expandvars ('../Data/'),
help='Path to base data directory (all other data paths are relative to this one).')
@click.option('--train-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_train.txt'),
help='Path to training dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--train-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing training images (relative to base-data-dir)')
@click.option('--orig-eval-data-path', type=str,
default=os.path.expandvars(
'Test/Prepared/im2line.txt'),
help='Path to original test dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--orig-eval-base-dir', type=str,
default=os.path.expandvars(
'Test/Prepared/LineImages'),
help='Path to directory containing original test images (relative to base-data-dir)')
@click.option('--synth-eval-data-path', type=str,
default=os.path.expandvars ('Synthetic/Prepared/data_val.txt'),
help='Path to synthetic evaluation dataset (image path to line text) text file (relative to base-data-dir)')
@click.option('--synth-eval-base-dir', type=str,
default=os.path.expandvars(
'Synthetic/Prepared/Images'),
help='Path to directory containing synthetic evaluation images (relative to base-data-dir)')
@click.option('--lexicon-path', type=str,
default=os.path.expandvars('char_to_class.pkl'),
help='Path to alphabet lexicon (letter to id), relative to base-data-dir.')
@click.option('--seq-proj', type=str, default="10x20", help='Projection of sequence')
@click.option('--backend', type=str, default="resnet18", help='Backend network to use (default is resnet18)')
@click.option('--snapshot', type=str, default=None, help='Path to pre-trained weights')
@click.option('--input-height', type=int, default=64, help='Height of input images to network')
@click.option('--base-lr', type=float, default=1e-4, help='Base learning rate.') # was e-3
#@click.option('--lr-decay', type=float, default=1e-4, help='Base learning rate') # was 0.0001
@click.option('--elastic-alpha', type=float, default=34, help='Elastic augmentation parameter alpha.')
@click.option('--elastic-sigma', type=float, default=3, help='Elastic augmentation parameter sigma.')
@click.option('--step-size', type=int, default=500, help='Step size for step lr change.')
@click.option('--max-iter', type=int, default=6000, help='Max iterations for taining')
@click.option('--batch-size', type=int, default=8, help='Batch size for training')
@click.option('--output-dir', type=str,
default='../Output/exp1',
help='Path to save output snapshot')
@click.option('--test-iter', type=int, default=1000, help='Number of iterations between test evaluation.')
@click.option('--show-iter', type=int, default=1000, help='Number of iterations between showing images in tensorboard.')
@click.option('--test-init', type=bool, default=False, help='Wether to test after network initialization initialization')
@click.option('--use-gpu', type=bool, default=True, help='Whether to use the gpu')
@click.option('--use-no-font-repeat-data', type=bool, default=True, help='Parameter to remove (always true) - whether to use random training data.')
@click.option('--do-vat', type=bool, default=False, help='Whether to do VAT on synthetic trainig data')
@click.option('--do-at', type=bool, default=False, help='Whether to do AT on synthetic trainig data')
@click.option('--vat-ratio', type=float, default=1, help='Ratio of vat on train data loss vs base loss')
@click.option('--test-vat-ratio', type=float, default=1, help='Ratio on vat on test data loss vs base loss')
@click.option('--vat-epsilon', type=float, default=2.5, help='VAT on train hyperparameter - epsilon')
@click.option('--vat-ip', type=int, default=1, help='VAT on train hyperparameter - number of power iterations')
@click.option('--vat-xi', type=float, default=10., help='VAT on train hyperparameter - xi')
@click.option('--vat-sign', type=bool, default=False, help='VAT on train hyperparameter - whether to do sign on vat loss')
@click.option('--do-remove-augs', type=bool, default=False, help='Whether to remove some of the augmentations (for ablation study)')
@click.option('--aug-to-remove', type=str,
default='',
help="with augmentation to remover out of ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']")
@click.option('--do-beam-search', type=bool, default=False, help='whether to do beam search inference in evaluation')
@click.option('--dropout-conv', type=bool, default=False, help='Whether to do dropout between convolution and rnn.')
@click.option('--dropout-rnn', type=bool, default=False, help='Whether to do dropout in rnn.')
@click.option('--dropout-output', type=bool, default=False, help='Whether to do dropout after rnn.')
@click.option('--do-ema', type=bool, default=False, help='Whether to do exponential moving average on weights')
@click.option('--do-gray', type=bool, default=False, help='whether to use grayscale instread of rgb')
@click.option('--do-test-vat', type=bool, default=False, help='Whether to do VAT loss on original test data')
@click.option('--do-test-entropy', type=bool, default=False, help='Whether to do entropy loss on original test data')
@click.option('--do-test-vat-cnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for cnn part')
@click.option('--do-test-vat-rnn', type=bool, default=False, help='Whether to do VAT loss on original test data only for rnn part')
@click.option('--ada-after-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on rnn part')
@click.option('--ada-before-rnn', type=bool, default=False, help='Whether to do adversarial domain adaptaion on cnn part')
@click.option('--do-ada-lr', type=bool, default=False, help='Whether to do lr rule suitable of adversarial domain adaptaion (from article)')
@click.option('--ada-ratio', type=float, default=1, help='Ratio of ADA loss vs base loss')
@click.option('--rnn-hidden-size', type=int, default=128, help='Size of rnn hidden layer')
@click.option('--do-lr-step', type=bool, default=False, help='Visualize output')
@click.option('--dataset-name', type=str, default='tibetan', help='Dataset name, currently wiener or tibetan')
def main(base_data_dir, train_data_path, train_base_dir,
orig_eval_data_path, orig_eval_base_dir,
synth_eval_data_path, synth_eval_base_dir,
lexicon_path, seq_proj, backend, snapshot, input_height, base_lr, elastic_alpha, elastic_sigma,
step_size, max_iter,
batch_size, output_dir, test_iter, show_iter, test_init, use_gpu, use_no_font_repeat_data,
do_vat, do_at, vat_ratio, test_vat_ratio, vat_epsilon, vat_ip, vat_xi, vat_sign,
do_remove_augs, aug_to_remove, do_beam_search,
dropout_conv, dropout_rnn, dropout_output, do_ema, do_gray, do_test_vat, do_test_entropy, do_test_vat_cnn,
do_test_vat_rnn,
ada_after_rnn, ada_before_rnn, do_ada_lr, ada_ratio, rnn_hidden_size,
do_lr_step,
dataset_name
):
if not do_lr_step and not do_ada_lr:
raise NotImplementedError('learning rate should be either step or ada.')
train_data_path = os.path.join(base_data_dir, train_data_path)
train_base_dir = os.path.join(base_data_dir, train_base_dir)
synth_eval_data_path = os.path.join(base_data_dir, synth_eval_data_path)
synth_eval_base_dir = os.path.join(base_data_dir, synth_eval_base_dir)
orig_eval_data_path = os.path.join(base_data_dir, orig_eval_data_path)
orig_eval_base_dir = os.path.join(base_data_dir, orig_eval_base_dir)
lexicon_path = os.path.join(base_data_dir, lexicon_path)
all_parameters = locals()
cuda = use_gpu
#print(train_base_dir)
if output_dir is not None:
os.makedirs(output_dir, exist_ok=True)
tb_writer = TbSummary(output_dir)
output_dir = os.path.join(output_dir, 'model')
os.makedirs(output_dir, exist_ok=True)
with open(lexicon_path, 'rb') as f:
lexicon = pkl.load(f)
#print(sorted(lexicon.items(), key=operator.itemgetter(1)))
with open(os.path.join(output_dir, 'params.txt'),'w') as f:
f.writelines(str(all_parameters))
print(all_parameters)
print('new vat')
sin_magnitude = 4
rotate_max_angle = 2
dataset_info = SynthDataInfo(None, None, None, dataset_name.lower())
train_fonts = dataset_info.font_names
all_args = locals()
allowed_removals = ['elastic', 'sine', 'sine_rotate', 'rotation', 'color_aug', 'color_gaus', 'color_sine']
if do_remove_augs and aug_to_remove not in allowed_removals:
raise Exception('augmentation removal value is not allowed.')
if do_remove_augs:
rand_trans = []
if aug_to_remove == 'elastic':
print('doing sine transform :)')
rand_trans.append(OnlySine(sin_magnitude=sin_magnitude))
elif aug_to_remove in ['sine', 'sine_rotate']:
print('doing elastic transform :)')
rand_trans.append(OnlyElastic(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma))
if aug_to_remove not in ['elastic', 'sine', 'sine_rotate']:
print('doing elastic transform :)')
print('doing sine transform :)')
rand_trans.append(ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude))
if aug_to_remove not in ['rotation', 'sine_rotate']:
print('doing rotation transform :)')
rand_trans.append(Rotation(angle=rotate_max_angle, fill_value=255))
if aug_to_remove not in ['color_aug', 'color_gaus', 'color_sine']:
print('doing color_aug transform :)')
rand_trans.append(ColorGradGausNoise())
elif aug_to_remove == 'color_gaus':
print('doing color_sine transform :)')
rand_trans.append(ColorGrad())
elif aug_to_remove == 'color_sine':
print('doing color_gaus transform :)')
rand_trans.append(ColorGausNoise())
else:
print('doing all transforms :)')
rand_trans = [
ElasticAndSine(elastic_alpha=elastic_alpha, elastic_sigma=elastic_sigma, sin_magnitude=sin_magnitude),
Rotation(angle=rotate_max_angle, fill_value=255),
ColorGradGausNoise()]
if do_gray:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()]
else:
rand_trans = rand_trans + [Resize(hight=input_height),
AddWidth(),
Normalize()]
transform_random = Compose(rand_trans)
if do_gray:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
ToGray(),
Normalize()
])
else:
transform_simple = Compose([
Resize(hight=input_height),
AddWidth(),
Normalize()
])
if use_no_font_repeat_data:
print('creating dataset')
train_data = TextDatasetRandomFont(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
print('finished creating dataset')
else:
print('train data path:\n{}'.format(train_data_path))
print('train_base_dir:\n{}'.format(train_base_dir))
train_data = TextDataset(data_path=train_data_path, lexicon=lexicon,
base_path=train_base_dir, transform=transform_random, fonts=train_fonts)
synth_eval_data = TextDataset(data_path=synth_eval_data_path, lexicon=lexicon,
base_path=synth_eval_base_dir, transform=transform_random, fonts=train_fonts)
orig_eval_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
orig_vat_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
if ada_after_rnn or ada_before_rnn:
orig_ada_data = TextDataset(data_path=orig_eval_data_path, lexicon=lexicon,
base_path=orig_eval_base_dir, transform=transform_simple, fonts=None)
#else:
# train_data = TestDataset(transform=transform, abc=abc).set_mode("train")
# synth_eval_data = TestDataset(transform=transform, abc=abc).set_mode("test")
# orig_eval_data = TestDataset(transform=transform, abc=abc).set_mode("test")
seq_proj = [int(x) for x in seq_proj.split('x')]
net = load_model(lexicon=train_data.get_lexicon(), seq_proj=seq_proj, backend=backend,
snapshot=snapshot, cuda=cuda, do_beam_search=do_beam_search,
dropout_conv=dropout_conv,
dropout_rnn=dropout_rnn,
dropout_output=dropout_output,
do_ema=do_ema,
ada_after_rnn=ada_after_rnn, ada_before_rnn=ada_before_rnn,
rnn_hidden_size=rnn_hidden_size
)
optimizer = optim.Adam(net.parameters(), lr = base_lr, weight_decay=0.0001)
if do_ada_lr:
print('using ada lr')
lr_scheduler = DannLR(optimizer, max_iter=max_iter)
elif do_lr_step:
print('using step lr')
lr_scheduler = StepLR(optimizer, step_size=step_size, max_iter=max_iter)
loss_function = CTCLoss()
synth_avg_ed_best = float("inf")
orig_avg_ed_best = float("inf")
epoch_count = 0
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
collate_vat = lambda x: text_collate(x, do_mask=True)
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
if ada_after_rnn or ada_before_rnn:
collate_ada = lambda x: text_collate(x, do_mask=True)
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
loss_domain = torch.nn.NLLLoss()
while True:
collate = lambda x: text_collate(x, do_mask=(do_vat or ada_before_rnn or ada_after_rnn))
data_loader = DataLoader(train_data, batch_size=batch_size, num_workers=4, shuffle=True, collate_fn=collate)
loss_mean_ctc = []
loss_mean_vat = []
loss_mean_at = []
loss_mean_comp = []
loss_mean_total = []
loss_mean_test_vat = []
loss_mean_test_pseudo = []
loss_mean_test_rand = []
loss_mean_ada_rnn_s = []
loss_mean_ada_rnn_t = []
loss_mean_ada_cnn_s = []
loss_mean_ada_cnn_t = []
iterator = tqdm(data_loader)
iter_count = 0
for iter_num, sample in enumerate(iterator):
total_iter = (epoch_count * len(data_loader)) + iter_num
if ((total_iter > 1) and total_iter % test_iter == 0) or (test_init and total_iter == 0):
# epoch_count != 0 and
print("Test phase")
net = net.eval()
if do_ema:
net.start_test()
synth_acc, synth_avg_ed, synth_avg_no_stop_ed, synth_avg_loss = test(net, synth_eval_data,
synth_eval_data.get_lexicon(),
cuda, visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer,
n_iter=total_iter,
initial_title='val_synth',
loss_function=loss_function,
output_path=os.path.join(
output_dir, 'results'),
do_beam_search=False)
orig_acc, orig_avg_ed, orig_avg_no_stop_ed, orig_avg_loss = test(net, orig_eval_data,
orig_eval_data.get_lexicon(), cuda,
visualize=False,
dataset_info=dataset_info,
batch_size=batch_size,
tb_writer=tb_writer, n_iter=total_iter,
initial_title='test_orig',
loss_function=loss_function,
output_path=os.path.join(output_dir,
'results'),
do_beam_search=do_beam_search)
net = net.train()
#save periodic
if output_dir is not None and total_iter // 30000:
periodic_save = os.path.join(output_dir, 'periodic_save')
os.makedirs(periodic_save, exist_ok=True)
old_save = glob.glob(os.path.join(periodic_save,'*'))
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_" + str(total_iter)))
if orig_avg_no_stop_ed < orig_avg_ed_best:
orig_avg_ed_best = orig_avg_no_stop_ed
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_best"))
if synth_avg_no_stop_ed < synth_avg_ed_best:
synth_avg_ed_best = synth_avg_no_stop_ed
if do_ema:
net.end_test()
print("synth: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(synth_avg_ed_best,
synth_avg_ed,
synth_avg_no_stop_ed,
synth_acc))
print("orig: avg_ed_best: {}\t avg_ed: {}; avg_nostop_ed: {}; acc: {}".format(orig_avg_ed_best,
orig_avg_ed,
orig_avg_no_stop_ed,
orig_acc))
tb_writer.get_writer().add_scalars('data/test',
{'synth_ed_total': synth_avg_ed,
'synth_ed_no_stop': synth_avg_no_stop_ed,
'synth_avg_loss': synth_avg_loss,
'orig_ed_total': orig_avg_ed,
'orig_ed_no_stop': orig_avg_no_stop_ed,
'orig_avg_loss': orig_avg_loss
}, total_iter)
if len(loss_mean_ctc) > 0:
train_dict = {'mean_ctc_loss': np.mean(loss_mean_ctc)}
if do_vat:
train_dict = {**train_dict, **{'mean_vat_loss':np.mean(loss_mean_vat)}}
if do_at:
train_dict = {**train_dict, **{'mean_at_loss':np.mean(loss_mean_at)}}
if do_test_vat:
train_dict = {**train_dict, **{'mean_test_vat_loss': np.mean(loss_mean_test_vat)}}
if do_test_vat_rnn and do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_crnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_rnn:
train_dict = {**train_dict, **{'mean_test_vat_rnn_loss': np.mean(loss_mean_test_vat)}}
elif do_test_vat_cnn:
train_dict = {**train_dict, **{'mean_test_vat_cnn_loss': np.mean(loss_mean_test_vat)}}
if ada_after_rnn:
train_dict = {**train_dict,
**{'mean_ada_rnn_s_loss': np.mean(loss_mean_ada_rnn_s),
'mean_ada_rnn_t_loss': np.mean(loss_mean_ada_rnn_t)}}
if ada_before_rnn:
train_dict = {**train_dict,
**{'mean_ada_cnn_s_loss': np.mean(loss_mean_ada_cnn_s),
'mean_ada_cnn_t_loss': np.mean(loss_mean_ada_cnn_t)}}
print(train_dict)
tb_writer.get_writer().add_scalars('data/train',
train_dict,
total_iter)
'''
# for multi-gpu support
if sample["img"].size(0) % len(gpu.split(',')) != 0:
continue
'''
optimizer.zero_grad()
imgs = Variable(sample["img"])
#print("images sizes are:")
#print(sample["img"].shape)
if do_vat or ada_after_rnn or ada_before_rnn:
mask = sample['mask']
labels_flatten = Variable(sample["seq"]).view(-1)
label_lens = Variable(sample["seq_len"].int())
#print("image sequence length is:")
#print(sample["im_seq_len"])
#print("label sequence length is:")
#print(sample["seq_len"].view(1,-1))
img_seq_lens = sample["im_seq_len"]
if cuda:
imgs = imgs.cuda()
if do_vat or ada_after_rnn or ada_before_rnn:
mask = mask.cuda()
if do_ada_lr:
ada_p = float(iter_count) / max_iter
lr_scheduler.update(ada_p)
if ada_before_rnn or ada_after_rnn:
if not do_ada_lr:
ada_p = float(iter_count) / max_iter
ada_alpha = 2. / (1. + np.exp(-10. * ada_p)) - 1
if cur_ada >= ada_len:
ada_load = DataLoader(orig_ada_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_ada)
ada_len = len(ada_load)
cur_ada = 0
ada_iter = iter(ada_load)
ada_batch = next(ada_iter)
cur_ada += 1
ada_imgs = Variable(ada_batch["img"])
ada_img_seq_lens = ada_batch["im_seq_len"]
ada_mask = ada_batch['mask'].byte()
if cuda:
ada_imgs = ada_imgs.cuda()
_, ada_cnn, ada_rnn = net(ada_imgs, ada_img_seq_lens,
ada_alpha=ada_alpha, mask=ada_mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.zeros(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_t = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_t = loss_domain(ada_rnn, domain_label)
if do_test_vat and do_at:
# test part!
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
test_vat_batch = next(vat_iter)
cur_vat += 1
test_vat_mask = test_vat_batch['mask']
test_vat_imgs = Variable(test_vat_batch["img"])
test_vat_img_seq_lens = test_vat_batch["im_seq_len"]
if cuda:
test_vat_imgs = test_vat_imgs.cuda()
test_vat_mask = test_vat_mask.cuda()
# train part
at_test_vat_loss = LabeledAtAndUnlabeledTestVatLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss, test_vat_loss = at_test_vat_loss(model=net, train_x=imgs, train_labels_flatten=labels_flatten,
train_img_seq_lens=img_seq_lens, train_label_lens=label_lens, batch_size=batch_size,
test_x=test_vat_imgs, test_seq_len=test_vat_img_seq_lens, test_mask=test_vat_mask)
elif do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
if cur_vat >= vat_len:
vat_load = DataLoader(orig_vat_data, batch_size=batch_size, num_workers=4, shuffle=True,
collate_fn=collate_vat)
vat_len = len(vat_load)
cur_vat = 0
vat_iter = iter(vat_load)
vat_batch = next(vat_iter)
cur_vat += 1
vat_mask = vat_batch['mask']
vat_imgs = Variable(vat_batch["img"])
vat_img_seq_lens = vat_batch["im_seq_len"]
if cuda:
vat_imgs = vat_imgs.cuda()
vat_mask = vat_mask.cuda()
if do_test_vat:
if do_test_vat_rnn or do_test_vat_cnn:
raise "can only do one of do_test_vat | (do_test_vat_rnn, do_test_vat_cnn)"
if vat_sign == True:
test_vat_loss = VATLossSign(do_test_entropy=do_test_entropy, xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
else:
test_vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn and do_test_vat_cnn:
test_vat_loss = VATonRnnCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_rnn:
test_vat_loss = VATonRnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
elif do_test_vat_cnn:
test_vat_loss = VATonCnnSign(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
if do_test_vat_cnn and do_test_vat_rnn:
test_vat_loss, cnn_lds, rnn_lds = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_test_vat:
test_vat_loss = test_vat_loss(net, vat_imgs, vat_img_seq_lens, vat_mask)
elif do_vat:
vat_loss = VATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
vat_loss = vat_loss(net, imgs, img_seq_lens, mask)
elif do_at:
at_loss = LabeledATLoss(xi=vat_xi, eps=vat_epsilon, ip=vat_ip)
at_loss = at_loss(net, imgs, labels_flatten, img_seq_lens, label_lens, batch_size)
if ada_after_rnn or ada_before_rnn:
preds, ada_cnn, ada_rnn = net(imgs, img_seq_lens, ada_alpha=ada_alpha, mask=mask)
if ada_before_rnn:
ada_num_features = ada_cnn.size(0)
else:
ada_num_features = ada_rnn.size(0)
domain_label = torch.ones(ada_num_features)
domain_label = domain_label.long()
if cuda:
domain_label = domain_label.cuda()
domain_label = Variable(domain_label)
if ada_before_rnn:
err_ada_cnn_s = loss_domain(ada_cnn, domain_label)
if ada_after_rnn:
err_ada_rnn_s = loss_domain(ada_rnn, domain_label)
else:
preds = net(imgs, img_seq_lens)
'''
if output_dir is not None:
if (show_iter is not None and iter_num != 0 and iter_num % show_iter == 0):
print_data_visuals(net, tb_writer, train_data.get_lexicon(), sample["img"], labels_flatten, label_lens,
preds, ((epoch_count * len(data_loader)) + iter_num))
'''
loss_ctc = loss_function(preds, labels_flatten,
Variable(torch.IntTensor(np.array(img_seq_lens))), label_lens) / batch_size
if loss_ctc.data[0] in [float("inf"), -float("inf")]:
print("warnning: loss should not be inf.")
continue
total_loss = loss_ctc
if do_vat:
#mask = sample['mask']
#if cuda:
# mask = mask.cuda()
#vat_loss = virtual_adversarial_loss(net, imgs, img_seq_lens, mask, is_training=True, do_entropy=False, epsilon=vat_epsilon, num_power_iterations=1,
# xi=1e-6, average_loss=True)
total_loss = total_loss + vat_ratio * vat_loss.cpu()
if do_test_vat or do_test_vat_rnn or do_test_vat_cnn:
total_loss = total_loss + test_vat_ratio * test_vat_loss.cpu()
if ada_before_rnn:
total_loss = total_loss + ada_ratio * err_ada_cnn_s.cpu() + ada_ratio * err_ada_cnn_t.cpu()
if ada_after_rnn:
total_loss = total_loss + ada_ratio * err_ada_rnn_s.cpu() + ada_ratio * err_ada_rnn_t.cpu()
total_loss.backward()
nn.utils.clip_grad_norm(net.parameters(), 10.0)
if -400 < loss_ctc.data[0] < 400:
loss_mean_ctc.append(loss_ctc.data[0])
if -1000 < total_loss.data[0] < 1000:
loss_mean_total.append(total_loss.data[0])
if len(loss_mean_total) > 100:
loss_mean_total = loss_mean_total[-100:]
status = "epoch: {0:5d}; iter_num: {1:5d}; lr: {2:.2E}; loss_mean: {3:.3f}; loss: {4:.3f}".format(epoch_count,
lr_scheduler.last_iter,
lr_scheduler.get_lr(),
np.mean(loss_mean_total),
loss_ctc.data[0])
if ada_after_rnn:
loss_mean_ada_rnn_s.append(err_ada_rnn_s.data[0])
loss_mean_ada_rnn_t.append(err_ada_rnn_t.data[0])
status += "; ladatrnns: {0:.3f}; ladatrnnt: {1:.3f}".format(
err_ada_rnn_s.data[0], err_ada_rnn_t.data[0]
)
if ada_before_rnn:
loss_mean_ada_cnn_s.append(err_ada_cnn_s.data[0])
loss_mean_ada_cnn_t.append(err_ada_cnn_t.data[0])
status += "; ladatcnns: {0:.3f}; ladatcnnt: {1:.3f}".format(
err_ada_cnn_s.data[0], err_ada_cnn_t.data[0]
)
if do_vat:
loss_mean_vat.append(vat_loss.data[0])
status += "; lvat: {0:.3f}".format(
vat_loss.data[0]
)
if do_at:
loss_mean_at.append(at_loss.data[0])
status += "; lat: {0:.3f}".format(
at_loss.data[0]
)
if do_test_vat:
loss_mean_test_vat.append(test_vat_loss.data[0])
status += "; l_tvat: {0:.3f}".format(
test_vat_loss.data[0]
)
if do_test_vat_rnn or do_test_vat_cnn:
loss_mean_test_vat.append(test_vat_loss.data[0])
if do_test_vat_rnn and do_test_vat_cnn:
status += "; l_tvatc: {}".format(
cnn_lds.data[0]
)
status += "; l_tvatr: {}".format(
rnn_lds.data[0]
)
else:
status += "; l_tvat: {}".format(
test_vat_loss.data[0]
)
iterator.set_description(status)
optimizer.step()
if do_lr_step:
lr_scheduler.step()
if do_ema:
net.udate_ema()
iter_count += 1
if output_dir is not None:
torch.save(net.state_dict(), os.path.join(output_dir, "crnn_" + backend + "_last"))
epoch_count += 1
return
if __name__ == '__main__':
main()
| [
"torch.nn.NLLLoss",
"torch.zeros",
"torch.autograd.Variable",
"torch.ones",
"torch.utils.data.DataLoader"
] | 0.4.0 | alexeypechorin/tibetan-transductive | e2356d5c0a7cbc2f2359d9cf5b6b18729fecd8de |
1.6 | import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \
nnUNetTrainerV2_insaneDA
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| [
"torch.cuda.is_available"
] | 1.6.0 | nasyxx/nnUNet | 92d5f2352349eed278e22f7a38cb86b0fccd7c75 |
0.4 |
import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
from .utils import zerocenter
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = zerocenter(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
class ZeroCenterEncoder(ResNet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pretrained = False
del self.fc
def forward(self, x):
x0 = self.conv1(x)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x1 = self.maxpool(x0)
x1 = zerocenter(x1)
x1 = self.layer1(x1)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
return [x4, x3, x2, x1, x0]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('fc.bias')
state_dict.pop('fc.weight')
super().load_state_dict(state_dict, **kwargs) | [
"torch.nn.Linear",
"torch.flatten",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 0.4.0 | vinnamkim/segmentation_models.pytorch | f967ded34df6fb536e8e8cba9b6491ae63b939f5 |
1.10 | """
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
| [
"torch.distributions.Categorical",
"torch.nn.MSELoss",
"torch.min",
"torch.clamp",
"torch.manual_seed",
"torch.full",
"torch.tensor",
"torch.load",
"torch.diag",
"torch.distributions.MultivariateNormal",
"torch.exp",
"torch.utils.tensorboard.SummaryWriter"
] | 1.10.0 | britig/S2RL-Policies | b9c74b7f5efec225920c09f7e8e82d8555d61bd9 |
0.1 | # Copyright (c) Facebook, Inc. and its affiliates.
import os
import warnings
# import git
import torch
import yaml
from pythia.common.registry import registry
from pythia.utils.distributed_utils import is_main_process, synchronize
from pythia.utils.general import (ckpt_name_from_core_args,
foldername_from_config_override, updir)
class Checkpoint:
def __init__(self, trainer):
"""
Generates a path for saving model which can also be used for resuming
from a checkpoint.
"""
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = self.config.training_parameters.save_dir
self.model_name = self.config.model
self.ckpt_foldername = ckpt_name_from_core_args(self.config)
self.ckpt_foldername += foldername_from_config_override(self.trainer.args)
self.device = registry.get("current_device")
self.ckpt_prefix = ""
if hasattr(self.trainer.model, "get_ckpt_name"):
self.ckpt_prefix = self.trainer.model.get_ckpt_name() + "_"
self.config["log_foldername"] = self.ckpt_foldername
self.ckpt_foldername = os.path.join(self.save_dir, self.ckpt_foldername)
self.pth_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + self.model_name + getattr(self.config.model_attributes,
self.model_name).code_name + "_final.pth"
)
self.models_foldername = os.path.join(self.ckpt_foldername, "models")
if not os.path.exists(self.models_foldername):
os.makedirs(self.models_foldername)
self.save_config()
self.repo_path = updir(os.path.abspath(__file__), n=3)
# self.repo = git.Repo(self.repo_path)
def save_config(self):
cfg_file = os.path.join(self.ckpt_foldername, "config.yaml")
with open(cfg_file, "w") as f:
# Pop out config_override if present to remove clutter in
# saved configuration yaml file
self.config.pop("config_override", None)
f.write(str(self.config))
def load_state_dict(self):
tp = self.config.training_parameters
if tp.resume_file is not None:
if os.path.exists(tp.resume_file):
self._load(tp.resume_file)
return
else:
raise RuntimeError("{} doesn't exist".format(tp.resume_file))
ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
if tp.resume is True:
if os.path.exists(ckpt_filepath):
self._load(ckpt_filepath)
else:
warnings.warn(
"Tried to resume but checkpoint filepath {} "
"is not present. Skipping.".format(ckpt_filepath)
)
def _load(self, file):
self.trainer.writer.write("Loading checkpoint")
ckpt = self._torch_load(file)
data_parallel = registry.get("data_parallel")
if "model" in ckpt:
ckpt_model = ckpt["model"]
else:
ckpt_model = ckpt
ckpt = {"model": ckpt}
pretrained_mapping = self.config.training_parameters.pretrained_mapping
if not self.config.training_parameters.load_pretrained:
pretrained_mapping = {}
new_dict = {}
# TODO: Move to separate function
for attr in ckpt_model:
if "fa_history" in attr:
new_dict[attr.replace("fa_history", "fa_context")] = ckpt_model[attr]
elif data_parallel is False and attr.startswith("module."):
# In case the ckpt was actually a data parallel model
# replace first module. from dataparallel with empty string
new_dict[attr.replace("module.", "", 1)] = ckpt_model[attr]
else:
new_dict[attr] = ckpt_model[attr]
if len(pretrained_mapping.items()) == 0:
final_dict = new_dict
self.trainer.model.load_state_dict(final_dict)
if "optimizer" in ckpt:
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
else:
warnings.warn(
"'optimizer' key is not present in the "
"checkpoint asked to be loaded. Skipping."
)
self.trainer.early_stopping.init_from_checkpoint(ckpt)
self.trainer.writer.write("Checkpoint loaded")
if "best_iteration" in ckpt:
self.trainer.current_iteration = ckpt["best_iteration"]
registry.register("current_iteration", self.trainer.current_iteration)
if "best_epoch" in ckpt:
self.trainer.current_epoch = ckpt["best_epoch"]
registry.register("current_epoch", self.trainer.current_epoch)
else:
final_dict = {}
model = self.trainer.model
own_state = model.state_dict()
for key, value in pretrained_mapping.items():
key += "."
value += "."
for attr in new_dict:
for own_attr in own_state:
if (
key in attr
and value in own_attr
and attr.replace(key, "") == own_attr.replace(value, "")
):
self.trainer.writer.write(
"Copying " + attr + " " + own_attr
)
own_state[own_attr].copy_(new_dict[attr])
self.trainer.writer.write("Pretrained model loaded")
def _load_state_dict_mapping(self, ckpt_model):
model = self.trainer.model
attr_mapping = {
"image_feature_encoders": "img_feat_encoders",
"image_feature_embeddings_list": "img_embeddings_list",
"image_text_multi_modal_combine_layer": "multi_modal_combine_layer",
"text_embeddings": "text_embeddings",
"classifier": "classifier",
}
data_parallel = registry.get("data_parallel")
if not data_parallel:
for key in attr_mapping:
attr_mapping[key.replace("module.", "")] = attr_mapping[key]
attr_mapping.pop(key)
for key in attr_mapping:
getattr(model, key).load_state_dict(ckpt_model[attr_mapping[key]])
def _torch_load(self, file):
if "cuda" in str(self.device):
return torch.load(file)
else:
return torch.load(file, map_location=lambda storage, loc: storage)
# def _get_vcs_fields(self):
# """Returns a dict with git fields of the current repository
#
# To reproduce an experiment directly from a checkpoint
#
# 1) Export `config` key as a yaml
# 2) Clone repository and checkout at given commit on given branch
# 3) Any local change (diff) while running the experiment is stored
# in the value with key `git/diff`, output the diff to a `path.diff`
# file and apply the patch to the current state by simply
#
# `patch -p0 < path.diff`
# """
#
# return {
# "git/branch": self.repo.active_branch.name,
# "git/commit_hash": self.repo.head.commit.name_rev,
# "git/commit_author": self.repo.head.commit.author.name,
# "git/commit_message": self.repo.head.commit.message,
# "git/diff": self.repo.git.diff("--no-prefix"),
# }
def save(self, iteration, update_best=False):
# Only save in main process
if not is_main_process():
return
ckpt_filepath = os.path.join(
self.models_foldername, "model_%d.ckpt" % iteration
)
best_ckpt_filepath = os.path.join(
self.ckpt_foldername, self.ckpt_prefix + "best.ckpt"
)
best_iteration = self.trainer.early_stopping.best_monitored_iteration
best_metric = self.trainer.early_stopping.best_monitored_value
ckpt = {
"model": self.trainer.model.state_dict(),
"optimizer": self.trainer.optimizer.state_dict(),
"best_iteration": best_iteration,
"best_metric_value": best_metric,
"config": self.config,
}
# git_metadata_dict = self._get_vcs_fields()
# ckpt.update(git_metadata_dict)
torch.save(ckpt, ckpt_filepath)
if update_best:
torch.save(ckpt, best_ckpt_filepath)
def restore(self):
self.trainer.writer.write("Restoring checkpoint")
best_path = os.path.join(self.ckpt_foldername, self.ckpt_prefix + "best.ckpt")
if os.path.exists(best_path):
ckpt = self._torch_load(best_path)
self.trainer.model.load_state_dict(ckpt["model"])
self.trainer.optimizer.load_state_dict(ckpt["optimizer"])
def finalize(self):
torch.save(self.trainer.model.state_dict(), self.pth_filepath)
| [
"torch.save",
"torch.load"
] | 0.1.6 | zean-wen/mmgnn_textvqa | 2cfe82ed54610975a1d4937f2032e5f4565ecbe7 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors,
# The HuggingFace Inc. team, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning models for NER and POS tagging."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
from dataclasses import dataclass, field
from typing import Optional
import json
import numpy as np
import scipy
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
# import lang2vec.lang2vec as l2v
from scipy.spatial import distance
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
HfArgumentParser,
MultiLingAdapterArguments,
AdapterConfig,
AdapterType,
)
#from xlm import XLMForTokenClassification
DEFAULT_LANGUAGES = {
'mr': 'hi',
'bn': 'hi',
'ta': 'ta',
'fo': 'fo',
'no': 'da',
'da': 'da',
'be': 'be',
'uk': 'uk',
'bg': 'bg'
}
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger.info(f'Seed = {args.seed}')
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id=None):
"""Train the model."""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
print(f'Local Rank = {args.local_rank}')
print(len(train_dataset))
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
logging.info([n for (n, p) in model.named_parameters() if p.requires_grad])
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
best_score = 0.0
best_checkpoint = None
patience = 0
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Add here for reproductibility (even between python 2 and 3)
cur_epoch = 0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
cur_epoch += 1
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch if t is not None)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == "xlm":
inputs["langs"] = batch[4]
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel training
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training:
# Only evaluate on single GPU otherwise metrics may not average well
results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.save_only_best_checkpoint:
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name)
if result["f1"] > best_score:
logger.info("result['f1']={} > best_score={}".format(result["f1"], best_score))
best_score = result["f1"]
# Save the best model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving the best model checkpoint to %s", output_dir)
logger.info("Reset patience to 0")
patience = 0
else:
patience += 1
logger.info("Hit patience={}".format(patience))
if args.eval_patience > 0 and patience > args.eval_patience:
logger.info("early stop! patience={}".format(patience))
epoch_iterator.close()
train_iterator.close()
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
else:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
model_to_save.save_all_adapters(output_dir)
if args.do_save_adapter_fusions:
model_to_save.save_all_adapter_fusions(output_dir)
if args.do_save_full_model:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weights, step=10, lang=None):
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"return_sequence_out": True,
"labels": batch[3]}
# logger.info(f'Language Adapters are {lang_adapter_names}')
adapter_weights = [torch.FloatTensor([0.5 for _ in range(len(lang_adapter_names))]).to(args.device) for _ in range(13)]
if args.lang_to_vec:
logger.info(lang)
logger.info(lang_adapter_names)
adapter_weights = calc_l2v_weights(lang, lang_adapter_names, args.en_weight)
logger.info(adapter_weights)
for step_no in range(step):
for w in adapter_weights: w.requires_grad = True
if args.lang_to_vec and step_no == 0:
normed_adapter_weights = adapter_weights
else:
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
# logger.info(f'Initial Adapter Weights = {normed_adapter_weights}')
model.set_active_adapters([lang_adapter_names, [task_name]])
inputs["adapter_names"] = [lang_adapter_names, [task_name]]
inputs["adapter_weights"] = normed_adapter_weights
outputs = model(**inputs)
loss, logits, orig_sequence_output = outputs[:3]
kept_logits = outputs[-1]
entropy = torch.nn.functional.softmax(kept_logits, dim=1)*torch.nn.functional.log_softmax(kept_logits, dim=1)
entropy = -entropy.sum() / kept_logits.size(0)
grads = torch.autograd.grad(entropy, adapter_weights)
#print(adapter_weights)
#print(grads)
#print(grads)
for i, w in enumerate(adapter_weights):
adapter_weights[i] = adapter_weights[i].data - 10*grads[i].data
normed_adapter_weights = [torch.nn.functional.softmax(w) for w in adapter_weights]
#print(normed_adapter_weights)
# logger.info(f'Final Adapter Weights = {normed_adapter_weights}')
return normed_adapter_weights
def jaccard_sim(vec1, vec2):
intersection = 0
union = 0
for i in range(len(vec1)):
if vec1[i] == '--' or vec2[i] == '--':
continue
if vec1[i] == 1 or vec2[i] == 1:
union += 1
if vec1[i] == 1 and vec2[i] == 1:
intersection += 1
return intersection/union
def get_sim(lang1, lang2):
features = l2v.get_features(f'{DEFAULT_LANGUAGES[lang1]} {lang2}', 'learned')
similarity = 1 - distance.cosine(features[DEFAULT_LANGUAGES[lang1]], features[lang2])
return similarity
def get_syntax_sim(lang1, lang2):
features = l2v.get_features(f'{lang1} {lang2}', "syntax_wals|syntax_sswl|syntax_ethnologue")
similarity = jaccard_sim(features[lang1], features[lang2])
return similarity
def calc_l2v_weights(args, lang, lang_adapter_names):
adapter_weight = []
for adapter_lang in lang_adapter_names:
if args.en_weight is not None and adapter_lang == 'en':
continue
if args.lang_to_vec == 'learned':
adapter_weight.append(get_sim(lang, adapter_lang))
elif args.lang_to_vec == 'syntax':
adapter_weight.append(get_syntax_sim(lang, adapter_lang))
else:
logger.info('INVALID FEATURE TYPE')
exit()
logger.info(adapter_weight)
adapter_weight = torch.FloatTensor(adapter_weight)
adapter_weight = torch.nn.functional.softmax(adapter_weight/args.temperature).tolist()
if args.en_weight is not None:
adapter_weight = [(1 - args.en_weight)*aw for aw in adapter_weight]
en_index = lang_adapter_names.index('en')
adapter_weight.insert(en_index, args.en_weight)
return adapter_weight
def scaled_input(emb, batch_size=16, num_batch=1, baseline=None, start_i=None, end_i=None):
# shape of emb: (num_head, seq_len, seq_len)
if baseline is None:
baseline = torch.zeros_like(emb)
num_points = batch_size * num_batch
scale = 1.0 / num_points
if start_i is None:
step = (emb.unsqueeze(0) - baseline.unsqueeze(0)) * scale
res = torch.cat([torch.add(baseline.unsqueeze(0), step*i) for i in range(num_points)], dim=0)
return res, step[0]
else:
step = (emb - baseline) * scale
start_emb = torch.add(baseline, step*start_i)
end_emb = torch.add(baseline, step*end_i)
step_new = (end_emb.unsqueeze(0) - start_emb.unsqueeze(0)) * scale
res = torch.cat([torch.add(start_emb.unsqueeze(0), step_new*i) for i in range(num_points)], dim=0)
return res, step_new[0]
#Changed the default of calc_weight_step to 0
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="", lang="en", lang2id=None, print_result=True, adapter_weight=None, lang_adapter_names=None, task_name=None, calc_weight_step=0):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, lang=lang, lang2id=lang2id)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
if args.get_attr:
eval_sampler = RandomSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
else:
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
counter = 0
head_importances = None
all_head_importances = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
counter += 1
logger.info(f'Batch number = {counter}')
batch = tuple(t.to(args.device) for t in batch)
if calc_weight_step > 0:
adapter_weight = calc_weight_multi(args, model, batch, lang_adapter_names, task_name, adapter_weight, calc_weight_step, lang=lang)
if args.get_attr:
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
inputs["output_attentions"] = True
outputs = model(**inputs)
tmp_eval_loss, logits, attentions, kept_labels, kl_logits = outputs
attr_all = []
res_attr = []
input_len = int(inputs["attention_mask"][0].sum())
example_head_importances = None
#Remove the batch_size dim since batch_size=1
logits = logits[0]
for tar_layer in range(12):
att = attentions[tar_layer][0]
pred_labels = torch.argmax(logits, dim=-1)
scale_att, step = scaled_input(att.data)
scale_att.requires_grad_(True)
attr_all = None
prob_all = None
for j_batch in range(1):
one_batch_att = scale_att[j_batch*16:(j_batch+1)*16]
_, grad = model(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask'], labels=inputs['labels'], tar_layer=tar_layer, tmp_score=one_batch_att, pred_labels=pred_labels)
grad = grad.sum(dim=0)
attr_all = grad if attr_all is None else torch.add(attr_all, grad)
# prob_all = tar_prob if prob_all is None else torch.cat([prob_all, tar_prob])
attr_all = attr_all[:,0:input_len,0:input_len] * step[:,0:input_len,0:input_len]
if example_head_importances is None:
example_head_importances = torch.amax(attr_all, dim=(1,2)).unsqueeze(0)
else:
tmp = torch.amax(attr_all, dim=(1,2))
tmp = tmp.unsqueeze(0)
example_head_importances = torch.cat((example_head_importances, tmp), dim=0)
# att = att[:,0:input_len,0:input_len]
res_attr.append(attr_all.data)
# logger.info(f'Example Head Importances = {example_head_importances}')
all_head_importances = example_head_importances.unsqueeze(0) if all_head_importances is None else torch.cat((all_head_importances, example_head_importances.unsqueeze(0)), dim=0)
head_importances = example_head_importances if head_importances is None else torch.add(head_importances, example_head_importances)
if counter == 100:
break
continue
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"adapter_weights": adapter_weight}
# logger.info(f'Labels = {batch[3]}')
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[4]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if args.get_attr:
head_importances = head_importances/counter
logger.info(f'Head Importances = {head_importances}')
torch.save(head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_importances_100.pt'))
torch.save(all_head_importances, os.path.join(args.output_dir,f'{mode}_{lang}_s{args.seed}_all_importances_100.pt'))
return None, None
if nb_eval_steps == 0:
results = {k: 0 for k in ["loss", "precision", "recall", "f1"]}
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
if print_result:
logger.info("***** Evaluation result %s in %s *****" % (prefix, lang))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode, lang, lang2id=None, few_shot=-1):
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
bpe_dropout = args.bpe_dropout
if mode != 'train': bpe_dropout = 0
if bpe_dropout > 0:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}_drop{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length), bpe_dropout))
else:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}".format(mode, lang,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length)))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
langs = lang.split(',')
logger.info("all languages = {}".format(lang))
features = []
for lg in langs:
data_file = os.path.join(args.data_dir, lg, "{}.{}".format(mode, args.model_name_or_path))
logger.info("Creating features from dataset file at {} in language {}".format(data_file, lg))
examples = read_examples_from_file(data_file, lg, lang2id)
print(examples)
features_lg = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta", "xlmr"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
lang=lg,
bpe_dropout=bpe_dropout,
)
features.extend(features_lg)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file {}, len(features)={}".format(cached_features_file, len(features)))
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if few_shot > 0 and mode == 'train':
logger.info("Original no. of examples = {}".format(len(features)))
features = features[: few_shot]
logger.info('Using few-shot learning on {} examples'.format(len(features)))
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
if args.model_type == 'xlm' and features[0].langs is not None:
all_langs = torch.tensor([f.langs for f in features], dtype=torch.long)
logger.info('all_langs[0] = {}'.format(all_langs[0]))
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_langs)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
model_type: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
labels: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
data_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
output_dir: str = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
max_seq_length: Optional[int] = field(
default=128, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
do_train: Optional[bool] = field(default=False )
do_eval: Optional[bool] = field(default=False )
do_predict: Optional[bool] = field(default=False )
do_adapter_predict: Optional[bool] = field(default=False )
do_predict_dev: Optional[bool] = field(default=False )
do_predict_train: Optional[bool] = field(default=False )
init_checkpoint: Optional[str] = field(default=None )
evaluate_during_training: Optional[bool] = field(default=False )
do_lower_case: Optional[bool] = field(default=False )
few_shot: Optional[int] = field(default=-1 )
per_gpu_train_batch_size: Optional[int] = field(default=8)
per_gpu_eval_batch_size: Optional[int] = field(default=8)
gradient_accumulation_steps: Optional[int] = field(default=1)
learning_rate: Optional[float] = field(default=5e-5)
weight_decay: Optional[float] = field(default=0.0)
adam_epsilon: Optional[float] = field(default=1e-8)
max_grad_norm: Optional[float] = field(default=1.0)
num_train_epochs: Optional[float] = field(default=3.0)
max_steps: Optional[int] = field(default=-1)
save_steps: Optional[int] = field(default=-1)
warmup_steps: Optional[int] = field(default=0)
logging_steps: Optional[int] = field(default=50)
save_only_best_checkpoint: Optional[bool] = field(default=False)
eval_all_checkpoints: Optional[bool] = field(default=False)
no_cuda: Optional[bool] = field(default=False)
overwrite_output_dir: Optional[bool] = field(default=False)
overwrite_cache: Optional[bool] = field(default=False)
seed: Optional[int] = field(default=42)
fp16: Optional[bool] = field(default=False)
fp16_opt_level: Optional[str] = field(default="O1")
local_rank: Optional[int] = field(default=-1)
server_ip: Optional[str] = field(default="")
server_port: Optional[str] = field(default="")
predict_langs: Optional[str] = field(default="en")
train_langs: Optional[str] = field(default="en")
log_file: Optional[str] = field(default=None)
eval_patience: Optional[int] = field(default=-1)
bpe_dropout: Optional[float] = field(default=0)
do_save_adapter_fusions: Optional[bool] = field(default=False)
task_name: Optional[str] = field(default="ner")
predict_task_adapter: Optional[str] = field(default=None)
predict_lang_adapter: Optional[str] = field(default=None)
test_adapter: Optional[bool] = field(default=False)
adapter_weight: Optional[str] = field(default=None)
lang_to_vec: Optional[str] = field(default=None)
calc_weight_step: Optional[int] = field(default=0)
predict_save_prefix: Optional[str] = field(default=None)
en_weight: Optional[float] = field(default=None)
temperature: Optional[float] = field(default=1.0)
get_attr: Optional[bool] = field(default=False)
topk: Optional[int] = field(default=1)
task: Optional[str] = field(default='udpos')
def setup_adapter(args, adapter_args, model, train_adapter=True, load_adapter=None, load_lang_adapter=None):
task_name = args.task_name or "ner"
# check if adapter already exists, otherwise add it
if task_name not in model.config.adapters.adapter_list(AdapterType.text_task):
logging.info("Trying to decide if add adapter")
# resolve the adapter config
adapter_config = AdapterConfig.load(
adapter_args.adapter_config,
non_linearity=adapter_args.adapter_non_linearity,
reduction_factor=adapter_args.adapter_reduction_factor,
)
# load a pre-trained from Hub if specified
if adapter_args.load_adapter or load_adapter:
logging.info("loading task adapter")
model.load_adapter(
adapter_args.load_adapter if load_adapter is None else load_adapter,
AdapterType.text_task,
config=adapter_config,
load_as=task_name,
)
# otherwise, add a fresh adapter
else:
logging.info("Adding task adapter")
model.add_adapter(task_name, AdapterType.text_task, config=adapter_config)
# optionally load a pre-trained language adapter
if adapter_args.load_lang_adapter or load_lang_adapter:
if load_lang_adapter is None:
# load a set of language adapters
logging.info("loading lang adpater {}".format(adapter_args.load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# if adapter_args.language == 'topk':
# assert len(args.predict_langs.split(',')) == 1
# filename = f'scripts/{args.task}/en/{args.predict_langs}.json'
# logger.info(f'Loading Adapter Languages from {filename}')
# languages = []
# with open(filename) as f:
# for i,line in enumerate(f):
# if i == args.topk:
# break
# line = json.loads(line)
# languages.append(line['adapter'].strip())
# adapter_names = [f'{lang}/wiki@ukp' for lang in languages]
# else:
# languages = adapter_args.language.split(",")
# adapter_names = adapter_args.load_lang_adapter.split(",")
# logger.info(f'Adapter Languages : {languages}, Length : {len(languages)}')
# logger.info(f'Adapter Names {adapter_names}, Length : {len(adapter_names)}')
# assert len(languages) == len(adapter_names)
# lang_adapter_names = []
# for language, adapter_name in zip(languages, adapter_names):
# logger.info(f'Language = {language}')
# logger.info(f'Adapter Name = {adapter_name}')
# lang_adapter_name = model.load_adapter(
# adapter_name,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as=language,
# )
# lang_adapter_names.append(lang_adapter_name)
else:
logging.info("loading lang adpater {}".format(load_lang_adapter))
# resolve the language adapter config
lang_adapter_config = AdapterConfig.load(
adapter_args.lang_adapter_config,
non_linearity=adapter_args.lang_adapter_non_linearity,
reduction_factor=adapter_args.lang_adapter_reduction_factor,
)
# load the language adapter from Hub
# lang_adapter_name = model.load_adapter(
# load_lang_adapter,
# AdapterType.text_lang,
# config=lang_adapter_config,
# load_as="lang",
# )
# lang_adapter_names = [lang_adapter_name]
else:
lang_adapter_name = None
lang_adapter_names = []
# Freeze all model weights except of those of this adapter
model.train_adapter([task_name])
# Set the adapters to be used in every forward pass
if lang_adapter_name:
model.set_active_adapters([lang_adapter_names, [task_name]])
else:
model.set_active_adapters([task_name])
return model, lang_adapter_names, task_name
def load_model(args, num_labels):
logger.info('Loading pretrained model and tokenizer')
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir,
)
args.model_type = config.model_type
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir,
use_fast=False,
)
if args.init_checkpoint:
logger.info("loading from init_checkpoint={}".format(args.init_checkpoint))
model = AutoModelForTokenClassification.from_pretrained(
args.init_checkpoint,
config=config,
cache_dir=args.cache_dir,
)
else:
logger.info("loading from existing model {}".format(args.model_name_or_path))
model = AutoModelForTokenClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
lang2id = config.lang2id if args.model_type == "xlm" else None
logger.info("Using lang2id = {}".format(lang2id))
return model, tokenizer, lang2id
def predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, split):
output_test_results_file = os.path.join(args.output_dir, f"{split}_results.txt")
with open(output_test_results_file, "a") as result_writer:
for lang in args.predict_langs.split(','):
#Check if language data exists
if not os.path.exists(os.path.join(args.data_dir, lang, '{}.{}'.format(split, args.model_name_or_path))):
logger.info("Language {}, split {} does not exist".format(lang, split))
continue
#Activate the required language adapter
adapter_weight = None
# if not args.adapter_weight and not args.lang_to_vec:
# if (adapter_args.train_adapter or args.test_adapter) and not args.adapter_weight:
# if lang in lang_adapter_names:
# logger.info(f'Language adapter for {lang} found')
# logger.info("Set active language adapter to {}".format(lang))
# model.set_active_adapters([[lang], [task_name]])
# else:
# logger.info(f'Language adapter for {lang} not found, using {lang_adapter_names[0]} instead')
# logger.info("Set active language adapter to {}".format(lang_adapter_names[0]))
# model.set_active_adapters([[lang_adapter_names[0]], [task_name]])
# else:
# if args.adapter_weight == 'equal':
# adapter_weight = [1/len(lang_adapter_names) for _ in lang_adapter_names]
# elif args.adapter_weight == 'equal_en':
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = [(1-args.en_weight)/(len(lang_adapter_names)-1) for _ in lang_adapter_names]
# en_index = lang_adapter_names.index('en')
# adapter_weight[en_index] = args.en_weight
# elif args.lang_to_vec:
# if args.en_weight is not None:
# logger.info(lang_adapter_names)
# assert 'en' in lang_adapter_names, 'English language adapter not included'
# adapter_weight = calc_l2v_weights(args, lang, lang_adapter_names)
# elif args.adapter_weight == 'load':
# filename = f'weights/{args.task}/{lang}/weights_s{args.seed}'
# logger.info(f'Loading adapter weights from {filename}')
# with open(filename) as f:
# adapter_weight = json.loads(next(f))
# elif args.adapter_weight != "0" and args.adapter_weight is not None:
# adapter_weight = [float(w) for w in args.adapter_weight.split(",")]
logger.info('Args Adapter Weight = {}'.format(args.adapter_weight))
logger.info('Adapter Languages = {}'.format(lang_adapter_names))
if adapter_weight is not None:
logger.info("Adapter Weights = {}".format(adapter_weight))
logger.info('Sum of Adapter Weights = {}'.format(sum(adapter_weight)))
logger.info("Length of Adapter Weights = {}".format(len(adapter_weight)))
# model.set_active_adapters([ lang_adapter_names, [task_name]])
#Evaluate
result, predictions = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=split, lang=lang, lang2id=lang2id, adapter_weight=adapter_weight, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
if args.get_attr:
continue
result_json = {}
# Save results
if args.predict_save_prefix is not None and args.predict_save_prefix:
result_json['language'] = f'{args.predict_save_prefix}_{lang}'
else:
result_json['language'] = f'{lang}'
result_json['seed'] = args.seed
result_json['language_adapters'] = lang_adapter_names
if args.adapter_weight:
result_json['adapter_weights'] = args.adapter_weight
for key in sorted(result.keys()):
result_json[key] = result[key]
result_writer.write(json.dumps(result_json) + '\n')
# Save predictions
if args.predict_save_prefix is not None and args.predict_save_prefix:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_{}_s{}_predictions.txt".format(split, args.predict_save_prefix, lang, args.seed))
else:
output_test_predictions_file = os.path.join(args.output_dir, "{}_{}_s{}_predictions.txt".format(split, lang, args.seed))
infile = os.path.join(args.data_dir, lang, "{}.{}".format(split, args.model_name_or_path))
idxfile = infile + '.idx'
save_predictions(args, predictions, output_test_predictions_file, infile, idxfile)
def main():
parser = argparse.ArgumentParser()
parser = HfArgumentParser((ModelArguments, MultiLingAdapterArguments))
args, adapter_args = parser.parse_args_into_dataclasses()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
# Initializes the distributed backend which sychronizes nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(handlers = [logging.FileHandler(args.log_file), logging.StreamHandler()],
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logging.info("Input args: %r" % args)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare NER/POS task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id
# so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
args.do_save_full_model= (not adapter_args.train_adapter)
args.do_save_adapters=adapter_args.train_adapter
if args.do_save_adapters:
logging.info('save adapters')
logging.info(adapter_args.train_adapter)
if args.do_save_full_model:
logging.info('save model')
# Make sure only the first process in distributed training loads model/vocab
if args.local_rank == 0:
torch.distributed.barrier()
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter:
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model)
logger.info("lang adapter names: {}".format(" ".join(lang_adapter_names)))
else:
lang_adatper_names = []
task_name = None
model.to(args.device)
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train", lang=args.train_langs, lang2id=lang2id, few_shot=args.few_shot)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id, lang_adapter_names, task_name, lang2id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use default names for the model,
# you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
# Save model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
logger.info("Saving model checkpoint to %s", args.output_dir)
model_to_save = model.module if hasattr(model, "module") else model
if args.do_save_adapters:
logging.info("Save adapter")
model_to_save.save_all_adapters(args.output_dir)
if args.do_save_adapter_fusions:
logging.info("Save adapter fusion")
model_to_save.save_all_adapter_fusions(args.output_dir)
if args.do_save_full_model:
logging.info("Save full model")
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Initialization for evaluation
results = {}
if args.init_checkpoint:
best_checkpoint = args.init_checkpoint
elif os.path.exists(os.path.join(args.output_dir, 'checkpoint-best')):
best_checkpoint = os.path.join(args.output_dir, 'checkpoint-best')
else:
best_checkpoint = args.output_dir
# Evaluation
#This evaluates only if the entire model is saved, something we are not doing
if args.do_eval and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating the model on dev set of training language(en)')
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix='debugging', lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
results.update(result)
# for checkpoint in checkpoints:
# global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
# model = AutoModelForTokenClassification.from_pretrained(checkpoint)
# if adapter_args.train_adapter:
# load_adapter = checkpoint + "/" + args.task_name
# load_lang_adapter = "{}/{}".format(checkpoint, adapter_args.language)
# model.model_name = args.model_name_or_path
# model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter)
#
# model.to(args.device)
# result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step, lang=args.train_langs, lang2id=lang2id, lang_adapter_names=lang_adapter_names, task_name=task_name, calc_weight_step=args.calc_weight_step)
# if result["f1"] > best_f1:
# best_checkpoint = checkpoint
# best_f1 = result["f1"]
# if global_step:
# result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
# results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
# writer.write("best checkpoint = {}, best f1 = {}\n".format(best_checkpoint, best_f1))
if args.do_predict and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
# Prediction
logger.info('Evaluating the model on test set of all the languages specified')
#Set up the task adapter
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'test')
if args.do_predict_train and args.local_rank in [-1, 0]:
logger.info('Evaluating on the train set of all specified languages')
model, tokenizer, lang2id = load_model(args, num_labels)
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'train')
#Predict dev set
if args.do_predict_dev and args.local_rank in [-1, 0]:
model, tokenizer, lang2id = load_model(args, num_labels)
logger.info('Evaluating on the dev sets of all the specified languages')
#Set up task and language adapters
if adapter_args.train_adapter or args.test_adapter:
load_adapter = (best_checkpoint + "/" + args.task_name) if args.predict_task_adapter is None else args.predict_task_adapter
# load_adapter = 'output/panx/bert-base-multilingual-cased-LR1e-4-epoch100-MaxLen128-TrainLangen_en_s0/checkpoint-best/ner/'
logger.info(f'Task Adapter will be loaded from this path {load_adapter}')
load_lang_adapter = args.predict_lang_adapter
model.model_name = args.model_name_or_path
model, lang_adapter_names, task_name = setup_adapter(args, adapter_args, model, load_adapter=load_adapter, load_lang_adapter=load_lang_adapter)
model.to(args.device)
predict_and_save(args, adapter_args, model, tokenizer, labels, lang2id, pad_token_label_id, lang_adapter_names, task_name, 'dev')
def save_predictions(args, predictions, output_file, text_file, idx_file, output_word_prediction=False):
# Save predictions
with open(text_file, "r") as text_reader, open(idx_file, "r") as idx_reader:
text = text_reader.readlines()
index = idx_reader.readlines()
assert len(text) == len(index)
# Sanity check on the predictions
with open(output_file, "w") as writer:
example_id = 0
prev_id = int(index[0])
for line, idx in zip(text, index):
if line == "" or line == "\n":
example_id += 1
else:
cur_id = int(idx)
output_line = '\n' if cur_id != prev_id else ''
if output_word_prediction:
output_line += line.split()[0] + '\t'
output_line += predictions[example_id].pop(0) + '\n'
writer.write(output_line)
prev_id = cur_id
if __name__ == "__main__":
main()
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.utils.data.RandomSampler",
"torch.amax",
"torch.cuda.is_available",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.FloatTensor",
"torch.manual_seed",
"torch.autograd.grad",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.zeros_like",
"torch.distributed.get_rank",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.save",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.nn.functional.log_softmax",
"torch.cuda.set_device",
"torch.nn.functional.softmax",
"torch.distributed.barrier",
"torch.utils.data.TensorDataset",
"torch.argmax",
"torch.no_grad",
"torch.add",
"torch.utils.data.distributed.DistributedSampler"
] | 1.0 | rohanshah13/cloud-emea-copy | 12acebc809080e5898ead86a412b17a5272759c2 |
1.4 | import numpy as np
from time import sleep
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.models.common_layers import batch_norm, get_nddr
from core.tasks import get_tasks
from core.utils import AttrDict
from core.utils.losses import poly
class SingleTaskNet(nn.Module):
def __init__(self, cfg, net1, net2):
super(SingleTaskNet, self).__init__()
self.cfg = cfg
self.net1 = net1
self.net2 = net2
assert len(net1.stages) == len(net2.stages)
self.task1, self.task2 = get_tasks(cfg)
self.num_stages = len(net1.stages)
self._step = 0
def step(self):
self._step += 1
def loss(self, x, labels):
label_1, label_2 = labels
result = self.forward(x)
result.loss1 = self.task1.loss(result.out1, label_1)
result.loss2 = self.task2.loss(result.out2, label_2)
result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2
return result
def forward(self, x):
N, C, H, W = x.size()
y = x.clone()
x = self.net1.base(x)
y = self.net2.base(y)
for stage_id in range(self.num_stages):
x = self.net1.stages[stage_id](x)
y = self.net2.stages[stage_id](y)
x = self.net1.head(x)
y = self.net2.head(y)
return AttrDict({'out1': x, 'out2': y})
class SharedFeatureNet(nn.Module):
def __init__(self, cfg, net1, net2):
super(SharedFeatureNet, self).__init__()
self.cfg = cfg
self.net1 = net1
self.net2 = net2
assert len(net1.stages) == len(net2.stages)
self.task1, self.task2 = get_tasks(cfg)
self.num_stages = len(net1.stages)
self._step = 0
def step(self):
self._step += 1
def loss(self, x, labels):
label_1, label_2 = labels
result = self.forward(x)
result.loss1 = self.task1.loss(result.out1, label_1)
result.loss2 = self.task2.loss(result.out2, label_2)
result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2
return result
def forward(self, x):
x = self.net1.base(x)
for stage_id in range(self.num_stages):
x = self.net1.stages[stage_id](x)
out1 = self.net1.head(x)
out2 = self.net2.head(x)
return AttrDict({'out1': out1, 'out2': out2})
class NDDRNet(nn.Module):
def __init__(self, cfg, net1, net2):
super(NDDRNet, self).__init__()
self.cfg = cfg
self.net1 = net1
self.net2 = net2
assert len(net1.stages) == len(net2.stages)
self.task1, self.task2 = get_tasks(cfg)
self.num_stages = len(net1.stages)
nddrs = []
total_channels = 0
for stage_id in range(self.num_stages):
out_channels = net1.stages[stage_id].out_channels
assert out_channels == net2.stages[stage_id].out_channels
if stage_id in cfg.TRAIN.AUX_LAYERS:
total_channels += out_channels
nddr = get_nddr(cfg, out_channels, out_channels)
nddrs.append(nddr)
nddrs = nn.ModuleList(nddrs)
self.aux = cfg.TRAIN.AUX
if self.aux:
print("Using shortcut")
self.aux_conv1 = nn.Sequential(
nn.Conv2d(total_channels, 256, kernel_size=3, padding=1, bias=False),
batch_norm(256, eps=1e-03, momentum=cfg.MODEL.BATCH_NORM_MOMENTUM),
nn.ReLU(inplace=True),
nn.Dropout2d(p=0.5),
nn.Conv2d(256, cfg.MODEL.NET1_CLASSES, kernel_size=1)
)
self.aux_conv2 = nn.Sequential(
nn.Conv2d(total_channels, 256, kernel_size=3, padding=1, bias=False),
batch_norm(256, eps=1e-03, momentum=cfg.MODEL.BATCH_NORM_MOMENTUM),
nn.ReLU(inplace=True),
nn.Dropout2d(p=0.5),
nn.Conv2d(256, cfg.MODEL.NET2_CLASSES, kernel_size=1)
)
self.nddrs = nn.ModuleDict({
'nddrs': nddrs,
})
self._step = 0
def step(self):
self._step += 1
def loss(self, x, labels):
label_1, label_2 = labels
result = self.forward(x)
result.loss1 = self.task1.loss(result.out1, label_1)
result.loss2 = self.task2.loss(result.out2, label_2)
result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2
if self.aux:
result.aux_loss1 = self.task1.loss(result.aux1, label_1)
result.aux_loss2 = self.task2.loss(result.aux2, label_2)
result.aux_loss = result.aux_loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.aux_loss2
result.aux_weight = poly(start=self.cfg.TRAIN.AUX_WEIGHT, end=0.,
steps=self._step, total_steps=self.cfg.TRAIN.STEPS,
period=self.cfg.TRAIN.AUX_PERIOD,
power=1.)
result.loss += result.aux_weight * result.aux_loss
return result
def forward(self, x):
N, C, H, W = x.size()
y = x.clone()
x = self.net1.base(x)
y = self.net2.base(y)
xs, ys = [], []
for stage_id in range(self.num_stages):
x = self.net1.stages[stage_id](x)
y = self.net2.stages[stage_id](y)
if isinstance(x, list):
x[0], y[0] = self.nddrs['nddrs'][stage_id](x[0], y[0])
else:
x, y = self.nddrs['nddrs'][stage_id](x, y)
if self.aux and self.training and stage_id in self.cfg.TRAIN.AUX_LAYERS:
xs.append(x)
ys.append(y)
x = self.net1.head(x)
y = self.net2.head(y)
result = AttrDict({'out1': x, 'out2': y})
if self.aux and self.training:
_, _, h, w = x.size()
aux_x = torch.cat([F.interpolate(_x, (h, w), mode='bilinear', align_corners=True) for _x in xs[:-1]] + [xs[-1]],
dim=1)
aux_y = torch.cat([F.interpolate(_y, (h, w), mode='bilinear', align_corners=True) for _y in ys[:-1]] + [ys[-1]],
dim=1)
result.aux1 = self.aux_conv1(aux_x)
result.aux2 = self.aux_conv2(aux_y)
return result
| [
"torch.nn.ModuleList",
"torch.nn.ModuleDict",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout2d"
] | 1.4.0 | WZzhaoyi/MTLNAS | c04fcce1437eef306a41a6a224551be99d88f9a3 |
1.7 | """Provide useful functions for using PTLFlow."""
# =============================================================================
# Copyright 2021 Henrique Morimitsu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
__version__ = '0.2.5'
import logging
from argparse import Namespace
from pathlib import Path
from typing import List, Optional
import requests
import torch
from torch import hub
from ptlflow.models.base_model.base_model import BaseModel
from ptlflow.models.dicl.dicl import DICL
from ptlflow.models.fastflownet.fastflownet import FastFlowNet
from ptlflow.models.flownet.flownet2 import FlowNet2
from ptlflow.models.flownet.flownetc import FlowNetC
from ptlflow.models.flownet.flownetcs import FlowNetCS
from ptlflow.models.flownet.flownetcss import FlowNetCSS
from ptlflow.models.flownet.flownets import FlowNetS
from ptlflow.models.flownet.flownetsd import FlowNetSD
from ptlflow.models.gma.gma import GMA
from ptlflow.models.hd3.hd3 import HD3, HD3Context
from ptlflow.models.irr.pwcnet import IRRPWCNet
from ptlflow.models.irr.pwcnet_irr import IRRPWCNetIRR
from ptlflow.models.irr.irr_pwc import IRRPWC
from ptlflow.models.lcv.lcv_raft import LCV_RAFT, LCV_RAFTSmall
from ptlflow.models.liteflownet.liteflownet import LiteFlowNet
from ptlflow.models.liteflownet.liteflownet3 import (
LiteFlowNet3, LiteFlowNet3PseudoReg, LiteFlowNet3S, LiteFlowNet3SPseudoReg)
from ptlflow.models.liteflownet.liteflownet2 import LiteFlowNet2, LiteFlowNet2PseudoReg
from ptlflow.models.maskflownet.maskflownet import MaskFlownet, MaskFlownet_S
from ptlflow.models.pwcnet.pwcnet import PWCNet, PWCDCNet
from ptlflow.models.raft.raft import RAFT, RAFTSmall
from ptlflow.models.scopeflow.irr_pwc_v2 import ScopeFlow
from ptlflow.models.starflow.starflow import StarFlow
from ptlflow.models.vcn.vcn import VCN, VCNSmall
from ptlflow.utils.utils import config_logging
try:
from ptlflow.models.scv.scv import SCVEighth, SCVQuarter
except ImportError as e:
print(e)
SCVEighth = None
SCVQuarter = None
config_logging()
models_dict = {
'dicl': DICL,
'fastflownet': FastFlowNet,
'flownet2': FlowNet2,
'flownetc': FlowNetC,
'flownetcs': FlowNetCS,
'flownetcss': FlowNetCSS,
'flownets': FlowNetS,
'flownetsd': FlowNetSD,
'gma': GMA,
'hd3': HD3,
'hd3_ctxt': HD3Context,
'irr_pwc': IRRPWC,
'irr_pwcnet': IRRPWCNet,
'irr_pwcnet_irr': IRRPWCNetIRR,
'lcv_raft': LCV_RAFT,
'lcv_raft_small': LCV_RAFTSmall,
'liteflownet': LiteFlowNet,
'liteflownet2': LiteFlowNet2,
'liteflownet2_pseudoreg': LiteFlowNet2PseudoReg,
'liteflownet3': LiteFlowNet3,
'liteflownet3_pseudoreg': LiteFlowNet3PseudoReg,
'liteflownet3s': LiteFlowNet3S,
'liteflownet3s_pseudoreg': LiteFlowNet3SPseudoReg,
'maskflownet': MaskFlownet,
'maskflownet_s': MaskFlownet_S,
'pwcnet': PWCNet,
'pwcdcnet': PWCDCNet,
'raft': RAFT,
'raft_small': RAFTSmall,
'scopeflow': ScopeFlow,
'scv4': SCVQuarter,
'scv8': SCVEighth,
'starflow': StarFlow,
'vcn': VCN,
'vcn_small': VCNSmall,
}
def download_scripts(
destination_dir: Path = Path('ptlflow_scripts')
) -> None:
"""Download the main scripts and configs to start working with PTLFlow."""
github_url = 'https://raw.githubusercontent.com/hmorimitsu/ptlflow/main/'
script_names = [
'datasets.yml',
'infer.py',
'test.py',
'train.py',
'validate.py'
]
destination_dir.mkdir(parents=True, exist_ok=True)
for sname in script_names:
script_url = github_url + sname
data = requests.get(script_url)
if data.status_code == 200:
with open(destination_dir / sname, 'wb') as f:
f.write(data.content)
else:
logging.warning('Script %s was not found.', script_url)
logging.info('Downloaded scripts to %s.', str(destination_dir))
def get_model(
model_name: str,
pretrained_ckpt: Optional[str] = None,
args: Optional[Namespace] = None
) -> BaseModel:
"""Return an instance of a chosen model.
The instance can have configured by he arguments, and load some existing pretrained weights.
Note that this is different from get_model_reference(), which returns a reference to the model class. The instance,
returned by this function, is a class already instantiated. Therefore, the return of this function is equivalent to
"return get_model_reference()()", which looks confusing. This can be rewritten as
"model_ref = get_model_reference(); return model_ref()".
Parameters
----------
model_name : str
Name of the model to get an instance of.
pretrained_ckpt : Optional[str], optional
Name of the pretrained weight to load or a path to a local checkpoint file.
args : Optional[Namespace], optional
Some arguments that ill be provided to the model.
Returns
-------
BaseModel
The instance of the chosen model.
Raises
------
ValueError
If the given checkpoint name is not a valid choice.
ValueError
If a checkpoint name is given, but the model does not have any pretrained weights available.
See Also
--------
get_model_reference : To get a reference to the class of a model.
"""
model_ref = get_model_reference(model_name)
if args is None:
parser = model_ref.add_model_specific_args()
args = parser.parse_args([])
model = model_ref(args)
if pretrained_ckpt is None and args is not None and args.pretrained_ckpt is not None:
pretrained_ckpt = args.pretrained_ckpt
if pretrained_ckpt is not None:
if Path(pretrained_ckpt).exists():
ckpt_path = pretrained_ckpt
elif hasattr(model_ref, 'pretrained_checkpoints'):
ckpt_path = model_ref.pretrained_checkpoints.get(pretrained_ckpt)
if ckpt_path is None:
raise ValueError(
f'Invalid checkpoint name {pretrained_ckpt}. '
f'Choose one from {{{",".join(model.pretrained_checkpoints.keys())}}}')
else:
raise ValueError(f'Cannot find checkpoint {pretrained_ckpt} for model {model_name}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if Path(ckpt_path).exists():
ckpt = torch.load(ckpt_path, map_location=torch.device(device))
else:
model_dir = Path(hub.get_dir()) / 'ptlflow' / 'checkpoints'
ckpt = hub.load_state_dict_from_url(
ckpt_path, model_dir=model_dir, map_location=torch.device(device), check_hash=True)
state_dict = ckpt['state_dict']
model.load_state_dict(state_dict)
return model
def get_model_reference(
model_name: str
) -> BaseModel:
"""Return a reference to the class of a chosen model.
Note that this is different from get_model(), which returns an instance of a model. The reference, returned by this
function, is a class before instantiation. Therefore, the return of this function can be used to instantiate a model as
"model_ref = get_model_reference(); model_instance = model_ref()".
Parameters
----------
model_name : str
Name of the model to get a reference of.
Returns
-------
BaseModel
A reference to the chosen model.
Raises
------
ValueError
If the given name is not a valid choice.
See Also
--------
get_model : To get an instance of a model.
"""
try:
return models_dict[model_name]
except KeyError:
raise ValueError(f'Unknown model name: {model_name}. Choose from [{", ".join(models_dict.keys())}]')
def get_trainable_model_names() -> List[str]:
"""Return a list of model names that are able to be trained.
This function return the names of the model that have a loss function defined.
Returns
=======
List[str]
The list of the model names that can be trained.
"""
return [mname for mname in models_dict.keys() if get_model(mname).loss_fn is not None]
| [
"torch.device",
"torch.cuda.is_available",
"torch.hub.get_dir"
] | 1.7.0 | hmorimitsu/ptlflow | 26f753322aef91b95ad78e743d847064e5d531b9 |
1.4 |
"""Rectify function"""
import torch
from torch.autograd import Function
from encoding import cpu
if torch.cuda.device_count() > 0:
from encoding import gpu
__all__ = ['rectify']
class _rectify(Function):
@staticmethod
def forward(ctx, y, x, kernel_size, stride, padding, dilation, average):
ctx.save_for_backward(x)
# assuming kernel_size is 3
kernel_size = [k + 2 * (d - 1) for k,d in zip(kernel_size, dilation)]
ctx.kernel_size = kernel_size
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.average = average
if x.is_cuda:
gpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
else:
cpu.conv_rectify(y, x, kernel_size, stride, padding, dilation, average)
ctx.mark_dirty(y)
return y
@staticmethod
def backward(ctx, grad_y):
x, = ctx.saved_variables
if x.is_cuda:
gpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
else:
cpu.conv_rectify(grad_y, x, ctx.kernel_size, ctx.stride,
ctx.padding, ctx.dilation, ctx.average)
ctx.mark_dirty(grad_y)
return grad_y, None, None, None, None, None, None
rectify = _rectify.apply
| [
"torch.cuda.device_count"
] | 1.4.0 | Womcos/SCARF | b90251bc23410cb810a7082ca75147a7aae21dec |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from flash.core.data.data_source import DefaultDataKeys
def vissl_collate_helper(samples):
result = []
for batch_ele in samples:
_batch_ele_dict = {}
_batch_ele_dict.update(batch_ele)
_batch_ele_dict[DefaultDataKeys.INPUT] = -1
result.append(_batch_ele_dict)
return torch.utils.data._utils.collate.default_collate(result)
def multicrop_collate_fn(samples):
"""Multi-crop collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
"""
result = vissl_collate_helper(samples)
inputs = [[] for _ in range(len(samples[0][DefaultDataKeys.INPUT]))]
for batch_ele in samples:
multi_crop_imgs = batch_ele[DefaultDataKeys.INPUT]
for idx, crop in enumerate(multi_crop_imgs):
inputs[idx].append(crop)
for idx, ele in enumerate(inputs):
inputs[idx] = torch.stack(ele)
result[DefaultDataKeys.INPUT] = inputs
return result
def simclr_collate_fn(samples):
"""Multi-crop collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
"""
result = vissl_collate_helper(samples)
inputs = []
num_views = len(samples[0][DefaultDataKeys.INPUT])
view_idx = 0
while view_idx < num_views:
for batch_ele in samples:
imgs = batch_ele[DefaultDataKeys.INPUT]
inputs.append(imgs[view_idx])
view_idx += 1
result[DefaultDataKeys.INPUT] = torch.stack(inputs)
return result
def moco_collate_fn(samples):
"""MOCO collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
"""
result = vissl_collate_helper(samples)
inputs = []
for batch_ele in samples:
inputs.append(torch.stack(batch_ele[DefaultDataKeys.INPUT]))
result[DefaultDataKeys.INPUT] = torch.stack(inputs).squeeze()[:, 0, :, :, :].squeeze()
result["data_momentum"] = torch.stack(inputs).squeeze()[:, 1, :, :, :].squeeze()
return result
| [
"torch.utils.data._utils.collate.default_collate",
"torch.stack"
] | 1.7.1 | Isaac-Flath/lightning-flash | 320f87707587d92a13c8831778864b33af4fe421 |
1.1 | # Copyright 2021 Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""GAN-based TTS ESPnet model."""
from contextlib import contextmanager
from distutils.version import LooseVersion
from typing import Any
from typing import Dict
from typing import Optional
import torch
from typeguard import check_argument_types
from espnet2.gan_tts.abs_gan_tts import AbsGANTTS
from espnet2.layers.abs_normalize import AbsNormalize
from espnet2.layers.inversible_interface import InversibleInterface
from espnet2.train.abs_gan_espnet_model import AbsGANESPnetModel
from espnet2.tts.feats_extract.abs_feats_extract import AbsFeatsExtract
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
from torch.cuda.amp import autocast
else:
# Nothing to do if torch < 1.6.0
@contextmanager
def autocast(enabled=True): # NOQA
yield
class ESPnetGANTTSModel(AbsGANESPnetModel):
"""GAN-based TTS ESPnet model."""
def __init__(
self,
feats_extract: Optional[AbsFeatsExtract],
normalize: Optional[AbsNormalize and InversibleInterface],
tts: AbsGANTTS,
):
"""Initialize ESPnetGANTTSModel module."""
assert check_argument_types()
super().__init__()
self.feats_extract = feats_extract
self.normalize = normalize
self.tts = tts
assert hasattr(
tts, "generator"
), "generator module must be resistered as tts.generator"
assert hasattr(
tts, "discriminator"
), "discriminator module must be resistered as tts.discriminator"
def forward(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
forward_generator: bool = True,
) -> Dict[str, Any]:
"""Return generator or discriminator loss with dict format.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B,).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker ID tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
forward_generator (bool): Whether to forward generator.
Returns:
Dict[str, Any]:
- loss (Tensor): Loss scalar tensor.
- stats (Dict[str, float]): Statistics to be monitored.
- weight (Tensor): Weight tensor to summarize losses.
- optim_idx (int): Optimizer index (0 for G and 1 for D).
"""
with autocast(False):
# Extract features
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
# Normalize
if self.normalize is not None:
feats, feats_lengths = self.normalize(feats, feats_lengths)
# Make batch for tts inputs
batch = {}
batch.update(text=text, text_lengths=text_lengths)
batch.update(forward_generator=forward_generator)
# Update kwargs for additional auxiliary inputs
if feats is not None:
batch.update(feats=feats, feats_lengths=feats_lengths)
if self.tts.require_raw_speech:
batch.update(speech=speech, speech_lengths=speech_lengths)
if spembs is not None:
batch.update(spembs=spembs)
if sids is not None:
batch.update(sids=sids)
if lids is not None:
batch.update(lids=lids)
return self.tts(**batch)
def collect_feats(
self,
text: torch.Tensor,
text_lengths: torch.Tensor,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
spembs: Optional[torch.Tensor] = None,
sids: Optional[torch.Tensor] = None,
lids: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""Calculate features and return them as a dict.
Args:
text (Tensor): Text index tensor (B, T_text).
text_lengths (Tensor): Text length tensor (B,).
speech (Tensor): Speech waveform tensor (B, T_wav).
speech_lengths (Tensor): Speech length tensor (B, 1).
spembs (Optional[Tensor]): Speaker embedding tensor (B, D).
sids (Optional[Tensor]): Speaker index tensor (B, 1).
lids (Optional[Tensor]): Language ID tensor (B, 1).
Returns:
Dict[str, Tensor]: Dict of features.
"""
feats = None
if self.feats_extract is not None:
feats, feats_lengths = self.feats_extract(speech, speech_lengths)
feats_dict = {}
if feats is not None:
feats_dict.update(feats=feats, feats_lengths=feats_lengths)
return feats_dict
| [
"torch.cuda.amp.autocast"
] | 1.1.0 | actboy/espnet | 66f0f8382b0e1195bed7c280c29711f8436b3db4 |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import sys
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
import monai
from monai.data import create_test_image_2d
from monai.engines import GanTrainer
from monai.engines.utils import GanKeys as Keys
from monai.handlers import CheckpointSaver, StatsHandler, TensorBoardStatsHandler
from monai.networks import normal_init
from monai.networks.nets import Discriminator, Generator
from monai.transforms import AsChannelFirstd, Compose, LoadImaged, RandFlipd, ScaleIntensityd, ToTensord
from monai.utils import set_determinism
from tests.utils import DistTestCase, TimedCall, skip_if_quick
def run_training_test(root_dir, device="cuda:0"):
real_images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
train_files = [{"reals": img} for img in zip(real_images)]
# prepare real data
train_transforms = Compose(
[
LoadImaged(keys=["reals"]),
AsChannelFirstd(keys=["reals"]),
ScaleIntensityd(keys=["reals"]),
RandFlipd(keys=["reals"], prob=0.5),
ToTensord(keys=["reals"]),
]
)
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
learning_rate = 2e-4
betas = (0.5, 0.999)
real_label = 1
fake_label = 0
# create discriminator
disc_net = Discriminator(
in_shape=(1, 64, 64), channels=(8, 16, 32, 64, 1), strides=(2, 2, 2, 2, 1), num_res_units=1, kernel_size=5
).to(device)
disc_net.apply(normal_init)
disc_opt = torch.optim.Adam(disc_net.parameters(), learning_rate, betas=betas)
disc_loss_criterion = torch.nn.BCELoss()
def discriminator_loss(gen_images, real_images):
real = real_images.new_full((real_images.shape[0], 1), real_label)
gen = gen_images.new_full((gen_images.shape[0], 1), fake_label)
realloss = disc_loss_criterion(disc_net(real_images), real)
genloss = disc_loss_criterion(disc_net(gen_images.detach()), gen)
return torch.div(torch.add(realloss, genloss), 2)
# create generator
latent_size = 64
gen_net = Generator(
latent_shape=latent_size, start_shape=(latent_size, 8, 8), channels=[32, 16, 8, 1], strides=[2, 2, 2, 1]
)
gen_net.apply(normal_init)
gen_net.conv.add_module("activation", torch.nn.Sigmoid())
gen_net = gen_net.to(device)
gen_opt = torch.optim.Adam(gen_net.parameters(), learning_rate, betas=betas)
gen_loss_criterion = torch.nn.BCELoss()
def generator_loss(gen_images):
output = disc_net(gen_images)
cats = output.new_full(output.shape, real_label)
return gen_loss_criterion(output, cats)
key_train_metric = None
train_handlers = [
StatsHandler(
name="training_loss", output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]}
),
TensorBoardStatsHandler(
log_dir=root_dir,
tag_name="training_loss",
output_transform=lambda x: {Keys.GLOSS: x[Keys.GLOSS], Keys.DLOSS: x[Keys.DLOSS]},
),
CheckpointSaver(
save_dir=root_dir, save_dict={"g_net": gen_net, "d_net": disc_net}, save_interval=2, epoch_level=True
),
]
disc_train_steps = 2
num_epochs = 5
trainer = GanTrainer(
device,
num_epochs,
train_loader,
gen_net,
gen_opt,
generator_loss,
disc_net,
disc_opt,
discriminator_loss,
d_train_steps=disc_train_steps,
latent_shape=latent_size,
key_train_metric=key_train_metric,
train_handlers=train_handlers,
)
trainer.run()
return trainer.state
@skip_if_quick
class IntegrationWorkflowsGAN(DistTestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, _ = create_test_image_2d(64, 64, num_objs=3, rad_max=14, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
@TimedCall(seconds=200, daemon=False)
def test_training(self):
torch.manual_seed(0)
finish_state = run_training_test(self.data_dir, device=self.device)
# assert GAN training finished
self.assertEqual(finish_state.iteration, 100)
self.assertEqual(finish_state.epoch, 5)
if __name__ == "__main__":
unittest.main()
| [
"torch.nn.Sigmoid",
"torch.add",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.nn.BCELoss"
] | 1.6 | Can-Zhao/MONAI | e0db5a564225a7cb62e7a23df97267019006302f |
1.6 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
import torch
from torch import nn
from torch.nn import functional as F
from monai.config.deviceconfig import USE_COMPILED
from monai.networks.layers.spatial_transforms import grid_pull
from monai.networks.utils import meshgrid_ij
from monai.utils import GridSampleMode, GridSamplePadMode, optional_import
_C, _ = optional_import("monai._C")
__all__ = ["Warp", "DVF2DDF"]
class Warp(nn.Module):
"""
Warp an image with given dense displacement field (DDF).
"""
def __init__(self, mode=GridSampleMode.BILINEAR.value, padding_mode=GridSamplePadMode.BORDER.value):
"""
For pytorch native APIs, the possible values are:
- mode: ``"nearest"``, ``"bilinear"``, ``"bicubic"``.
- padding_mode: ``"zeros"``, ``"border"``, ``"reflection"``
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
For MONAI C++/CUDA extensions, the possible values are:
- mode: ``"nearest"``, ``"bilinear"``, ``"bicubic"``, 0, 1, ...
- padding_mode: ``"zeros"``, ``"border"``, ``"reflection"``, 0, 1, ...
See also: :py:class:`monai.networks.layers.grid_pull`
"""
super().__init__()
# resolves _interp_mode for different methods
if USE_COMPILED:
if mode in (inter.value for inter in GridSampleMode):
mode = GridSampleMode(mode)
if mode == GridSampleMode.BILINEAR:
mode = 1
elif mode == GridSampleMode.NEAREST:
mode = 0
elif mode == GridSampleMode.BICUBIC:
mode = 3
else:
mode = 1 # default to linear
self._interp_mode = mode
else:
warnings.warn("monai.networks.blocks.Warp: Using PyTorch native grid_sample.")
self._interp_mode = GridSampleMode(mode).value
# resolves _padding_mode for different methods
if USE_COMPILED:
if padding_mode in (pad.value for pad in GridSamplePadMode):
padding_mode = GridSamplePadMode(padding_mode)
if padding_mode == GridSamplePadMode.ZEROS:
padding_mode = 7
elif padding_mode == GridSamplePadMode.BORDER:
padding_mode = 0
elif padding_mode == GridSamplePadMode.REFLECTION:
padding_mode = 1
else:
padding_mode = 0 # default to nearest
self._padding_mode = padding_mode
else:
self._padding_mode = GridSamplePadMode(padding_mode).value
@staticmethod
def get_reference_grid(ddf: torch.Tensor) -> torch.Tensor:
mesh_points = [torch.arange(0, dim) for dim in ddf.shape[2:]]
grid = torch.stack(meshgrid_ij(*mesh_points), dim=0) # (spatial_dims, ...)
grid = torch.stack([grid] * ddf.shape[0], dim=0) # (batch, spatial_dims, ...)
grid = grid.to(ddf)
return grid
def forward(self, image: torch.Tensor, ddf: torch.Tensor):
"""
Args:
image: Tensor in shape (batch, num_channels, H, W[, D])
ddf: Tensor in the same spatial size as image, in shape (batch, ``spatial_dims``, H, W[, D])
Returns:
warped_image in the same shape as image (batch, num_channels, H, W[, D])
"""
spatial_dims = len(image.shape) - 2
if spatial_dims not in (2, 3):
raise NotImplementedError(f"got unsupported spatial_dims={spatial_dims}, currently support 2 or 3.")
ddf_shape = (image.shape[0], spatial_dims) + tuple(image.shape[2:])
if ddf.shape != ddf_shape:
raise ValueError(
f"Given input {spatial_dims}-d image shape {image.shape}, " f"the input DDF shape must be {ddf_shape}."
)
grid = self.get_reference_grid(ddf) + ddf
grid = grid.permute([0] + list(range(2, 2 + spatial_dims)) + [1]) # (batch, ..., spatial_dims)
if not USE_COMPILED: # pytorch native grid_sample
for i, dim in enumerate(grid.shape[1:-1]):
grid[..., i] = grid[..., i] * 2 / (dim - 1) - 1
index_ordering: List[int] = list(range(spatial_dims - 1, -1, -1))
grid = grid[..., index_ordering] # z, y, x -> x, y, z
return F.grid_sample(
image, grid, mode=self._interp_mode, padding_mode=f"{self._padding_mode}", align_corners=True
)
# using csrc resampling
return grid_pull(image, grid, bound=self._padding_mode, extrapolate=True, interpolation=self._interp_mode)
class DVF2DDF(nn.Module):
"""
Layer calculates a dense displacement field (DDF) from a dense velocity field (DVF)
with scaling and squaring.
Adapted from:
DeepReg (https://github.com/DeepRegNet/DeepReg)
"""
def __init__(
self, num_steps: int = 7, mode=GridSampleMode.BILINEAR.value, padding_mode=GridSamplePadMode.ZEROS.value
):
super().__init__()
if num_steps <= 0:
raise ValueError(f"expecting positive num_steps, got {num_steps}")
self.num_steps = num_steps
self.warp_layer = Warp(mode=mode, padding_mode=padding_mode)
def forward(self, dvf):
"""
Args:
dvf: dvf to be transformed, in shape (batch, ``spatial_dims``, H, W[,D])
Returns:
a dense displacement field
"""
ddf: torch.Tensor = dvf / (2 ** self.num_steps)
for _ in range(self.num_steps):
ddf = ddf + self.warp_layer(image=ddf, ddf=ddf)
return ddf
| [
"torch.nn.functional.grid_sample",
"torch.stack",
"torch.arange"
] | 1.6 | Can-Zhao/MONAI | e0db5a564225a7cb62e7a23df97267019006302f |
0.4 | from __future__ import print_function
import argparse
import os
import pickle
import sys
import cv2
import numpy as np
import torch
import vlfeat # calls constructor
from sklearn.cluster import MiniBatchKMeans
from src.utils.cluster.eval_metrics import _hungarian_match, _original_match, \
_acc
from src.utils.segmentation.data import make_Coco_dataloaders, \
make_Potsdam_dataloaders
SIFT_DLEN = 128
SIFT_STEP = 10
def _get_vectorised_sift_samples(archetype_config, dataloader):
# returns num unmasked pixels x SIFT_DLEN, in uint8 format
# operates on greyscale 128 bit images
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual (box central) pixels
desc_side = int(img_sz / SIFT_STEP)
print("img sz %d, desc_side %d" % (img_sz, desc_side))
sys.stdout.flush()
descs_all = np.zeros((num_imgs_max, desc_side * desc_side,
SIFT_DLEN), dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, desc_side * desc_side), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
# when descriptor matrix flattened, goes along rows first (rows change slow)
central_inds_h = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((desc_side, 1)).repeat(desc_side,
axis=1)
central_inds_w = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((1, desc_side)).repeat(desc_side,
axis=0)
central_inds_h, central_inds_w = central_inds_h.reshape(-1), \
central_inds_w.reshape(-1)
for b_i, batch in enumerate(dataloader):
if len(batch) == 3: # test dataloader
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, desc_side * desc_side),
dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else: # training dataloader
store_labels = False
imgs, _, _, masks = batch
# imgs currently channel first, [0-1] range, floats
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
for i in range(curr_batch_sz):
grey_img = cv2.cvtColor(imgs[i, :, :, :], cv2.COLOR_RGB2GRAY)
locs, descs = vlfeat.vl_dsift(grey_img, step=SIFT_STEP)
descs = descs.transpose((1, 0)) # 40*40, 128
descs = descs.reshape(-1, SIFT_DLEN) # rows change slowest
# get the corresponding box central mask/label
mask = masks[i][central_inds_h, central_inds_w]
offset = start + i
descs_all[offset, :, :] = descs
masks_all[offset, :] = mask
if store_labels:
label = labels[i][central_inds_h, central_inds_w]
labels_all[offset, :] = label
actual_num_imgs += curr_batch_sz
descs_all = descs_all[:actual_num_imgs, :, :]
masks_all = masks_all[:actual_num_imgs, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = descs_all[masks_all, :].reshape(-1, SIFT_DLEN)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def _get_vectorised_colour_samples(archetype_config, dataloader):
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual pixels
imgs_all = np.zeros(
(num_imgs_max, img_sz, img_sz, archetype_config.in_channels),
dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
for b_i, batch in enumerate(dataloader):
if len(batch) == 3:
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else:
store_labels = False
imgs, _, _, masks = batch
# channels last
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
imgs_all[start:(start + curr_batch_sz), :, :, :] = imgs
masks_all[start:(start + curr_batch_sz), :, :] = masks
if store_labels:
labels_all[start:(start + curr_batch_sz), :, :] = labels
actual_num_imgs += curr_batch_sz
imgs_all = imgs_all[:actual_num_imgs, :, :, :]
masks_all = masks_all[:actual_num_imgs, :, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = imgs_all[masks_all, :].reshape(-1, archetype_config.in_channels)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def main():
# based on segmentation_multioutput_twohead - we pass in the config of the
# IID run we are comparing against, so the settings can be copied
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--IID_model_ind", type=int, required=True)
parser.add_argument("--max_num_train", type=int, required=True)
parser.add_argument("--test_code", default=False, action="store_true")
parser.add_argument("--do_sift", default=False, action="store_true")
config = parser.parse_args()
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
archetype_config_path = os.path.join(config.out_root,
str(config.IID_model_ind),
"config.pickle")
print("Loading archetype config from: %s" % archetype_config_path)
with open(archetype_config_path, "rb") as config_f:
archetype_config = pickle.load(config_f)
assert (config.IID_model_ind == archetype_config.model_ind)
assert (archetype_config.mode == "IID") # compare against fully unsup
sample_fn = _get_vectorised_colour_samples
if config.do_sift:
sample_fn = _get_vectorised_sift_samples
# set it to be only rgb (and ir if nec) but no sobel - we're clustering
# single pixel colours
archetype_config.include_rgb = True
archetype_config.no_sobel = True
if "Coco" in archetype_config.dataset:
assert (not archetype_config.using_IR)
archetype_config.in_channels = 3
elif archetype_config.dataset == "Potsdam": # IR
assert (archetype_config.using_IR)
archetype_config.in_channels = 4
# Data
# -------------------------------------------------------------------------
if "Coco" in archetype_config.dataset:
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Coco_dataloaders(archetype_config)
elif archetype_config.dataset == "Potsdam":
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Potsdam_dataloaders(archetype_config)
else:
raise NotImplementedError
# unlike in clustering script for STL - isn't any data from unknown classes
dataloaders_head_B = dataloaders_head_A
# networks and optimisers
# ------------------------------------------------------
assert (archetype_config.num_dataloaders == 1)
dataloader = dataloaders_head_B[0]
samples = sample_fn(archetype_config, dataloader)
print("got training samples")
sys.stdout.flush()
if config.test_code:
print("testing code, taking 10000 samples only")
samples = samples[:10000, :]
else:
num_samples_train = min(samples.shape[0], config.max_num_train)
print("taking %d samples" % num_samples_train)
chosen_inds = np.random.choice(samples.shape[0], size=num_samples_train,
replace=False)
samples = samples[chosen_inds, :]
print(samples.shape)
sys.stdout.flush()
kmeans = MiniBatchKMeans(n_clusters=archetype_config.gt_k, verbose=1).fit(
samples)
print("trained kmeans")
sys.stdout.flush()
# use mapping assign to assign output_k=gt_k to gt_k
# and also assess on its predictions, since it's identical to
# mapping_test_dataloader
assign_samples, assign_labels = sample_fn(archetype_config,
mapping_assignment_dataloader)
num_samples = assign_samples.shape[0]
assign_preds = kmeans.predict(assign_samples)
print("finished prediction for mapping assign/test data")
sys.stdout.flush()
assign_preds = torch.from_numpy(assign_preds).cuda()
assign_labels = torch.from_numpy(assign_labels).cuda()
if archetype_config.eval_mode == "hung":
match = _hungarian_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig": # flat!
match = _original_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig_soft":
assert (False) # not used
# reorder predictions to be same cluster assignments as gt_k
found = torch.zeros(archetype_config.gt_k)
reordered_preds = torch.zeros(num_samples).to(torch.int32).cuda()
for pred_i, target_i in match:
reordered_preds[assign_preds == pred_i] = target_i
found[pred_i] = 1
assert (found.sum() == archetype_config.gt_k) # each output_k must get mapped
acc = _acc(reordered_preds, assign_labels, archetype_config.gt_k)
print("got acc %f" % acc)
config.epoch_acc = [acc]
config.centroids = kmeans.cluster_centers_
config.match = match
# write results and centroids to model_ind output file
with open(os.path.join(config.out_dir, "config.pickle"), "w") as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file:
text_file.write("%s" % config)
if __name__ == "__main__":
main()
| [
"torch.zeros",
"torch.from_numpy"
] | 0.4.1 | THinnerichs/MiS-Information-Clustering | 597c70e1283222e0e841e24f6805b967aaf3c9e0 |
1.7 | from copy import copy
from typing import Optional
import torch
import pytorch_lightning as pl
from transformers import (
EncoderDecoderModel,
RobertaModel,
RobertaConfig,
GPT2LMHeadModel,
GPT2Config,
RobertaTokenizer,
GPT2Tokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import nltk
nltk.download("wordnet")
class EncoderDecoderModule(pl.LightningModule):
def __init__(
self,
learning_rate: float,
src_tokenizer: RobertaTokenizer,
trg_tokenizer: GPT2Tokenizer,
num_epochs: int,
num_batches: int,
num_gpus: int,
num_layers_encoder: Optional[int] = None,
num_layers_decoder: Optional[int] = None,
encoder_name_or_path: Optional[str] = None,
decoder_name_or_path: Optional[str] = None,
**kwargs,
):
super().__init__()
self._src_tokenizer = src_tokenizer
self._trg_tokenizer = trg_tokenizer
self._num_epochs = num_epochs
self._num_batches = num_batches
self._num_gpus = num_gpus
self.learning_rate = learning_rate
self.save_hyperparameters()
if encoder_name_or_path is not None and decoder_name_or_path is not None:
# use pretrained RoBERTa as encoder
encoder = RobertaModel.from_pretrained(encoder_name_or_path)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# remove layers if necessary
if num_layers_encoder is not None and num_layers_encoder < encoder.config.num_hidden_layers:
encoder = EncoderDecoderModule.remove_layers_from_model(encoder, num_layers_encoder, is_gpt=False)
# use pretrained GPT-2 as decoder
config = GPT2Config.from_pretrained(decoder_name_or_path)
config.is_decoder = True
config.add_cross_attention = True
decoder = GPT2LMHeadModel.from_pretrained(decoder_name_or_path, config=config)
# remove layers if necessary
if num_layers_decoder is not None and num_layers_decoder < decoder.config.n_layer:
decoder = EncoderDecoderModule.remove_layers_from_model(decoder, num_layers_decoder, is_gpt=True)
elif num_layers_decoder is not None and num_layers_encoder is not None:
# use randomly initialized RoBERTa as encoder
encoder_config = RobertaConfig()
encoder_config.num_hidden_layers = num_layers_encoder
encoder = RobertaModel(config=encoder_config)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# use randomly initialized GPT-2 as decoder
decoder_config = GPT2Config()
decoder_config.n_layer = num_layers_decoder
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
decoder = GPT2LMHeadModel(config=decoder_config)
else:
raise ValueError(
"You have to specify either num_layers for training from scratch \
or paths for loading pretrained models"
)
self.model = EncoderDecoderModel(encoder=encoder, decoder=decoder)
# cache is currently not supported by EncoderDecoder framework
self.model.decoder.config.use_cache = False
# do not tie output embeddings to input embeddings
self.model.config.tie_word_embeddings = False
# to make logs for different batch sizes prettier
self.examples_count = 0
def forward(self, batch):
return self.model(
input_ids=batch["diff_input_ids"],
attention_mask=batch["diff_attention_mask"],
decoder_input_ids=batch["msg_input_ids"],
decoder_attention_mask=batch["msg_attention_mask"],
labels=batch["msg_labels"],
)
def training_step(self, batch, batch_idx):
self.examples_count += len(batch["diff_input_ids"])
loss, logits = self(batch)[:2]
self.logger.experiment.log({"train_loss_step": loss}, step=self.examples_count)
return {"loss": loss}
def training_epoch_end(self, outputs):
train_loss_mean = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.log({"train_loss_epoch": train_loss_mean}, step=self.examples_count)
def next_token_metrics_step(self, batch):
loss, scores = self(batch)[:2]
return {"loss": loss}
def next_token_metrics_epoch_end(self, outputs, stage):
"""
Logic for validation & testing epoch end:
1) Calculate accuracy@1, accuracy@5, MRR@5
2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint
3) Log everything to wandb
"""
loss = torch.stack([x["loss"] for x in outputs]).mean()
metrics = {f"{stage}_loss_epoch": loss}
if stage == "val":
self.log("val_loss_epoch", metrics["val_loss_epoch"], on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.logger.experiment.log(metrics, step=self.examples_count)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.next_token_metrics_step(batch)
def validation_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="val")
def test_step(self, batch, batch_idx):
return self.next_token_metrics_step(batch)
def test_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="test")
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
scheduler = {
"scheduler": get_linear_schedule_with_warmup(
optimizer, 4000 // self._num_gpus, self._num_epochs * self._num_batches
),
"interval": "step",
"frequency": 1,
}
return [optimizer], [scheduler]
@staticmethod
def remove_layers_from_model(teacher, num_layers, is_gpt):
if not is_gpt:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.num_hidden_layers = num_layers
student = RobertaModel(config=student_config)
# copy all embeddings
student.embeddings.word_embeddings = teacher.embeddings.word_embeddings
student.embeddings.position_embeddings = teacher.embeddings.position_embeddings
student.embeddings.token_type_embeddings = teacher.embeddings.token_type_embeddings
student.embeddings.LayerNorm = teacher.embeddings.LayerNorm
student.embeddings.dropout = teacher.embeddings.dropout
# uniformly pick from middle layers from teacher
# it is basically np.linspace(0, teacher_config.num_hidden_layers,
# num=student_config.num_hidden_layers, endpoint=True)
step = (teacher_config.num_hidden_layers - 1) / (student_config.num_hidden_layers - 1)
for student_layer, teacher_layer in enumerate(
int(i * step) for i in range(student_config.num_hidden_layers)
):
student.encoder.layer[student_layer] = teacher.encoder.layer[teacher_layer]
else:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.n_layer = num_layers
student = GPT2LMHeadModel(config=student_config)
# Copying all embeddings
student.transformer.wte = teacher.transformer.wte
student.transformer.wpe = teacher.transformer.wpe
student.transformer.drop = teacher.transformer.drop
# Maybe there is something else in BERT that need to be copied!
# Specific thing for GPT2LMHead. Not necessary for BERT
student.tie_weights()
# Uniformly pick from middle layers from teacher
# It is basically np.linspace(0, teacher_config.n_layer, num=student_config.n_layer, endpoint=True)
step = (teacher_config.n_layer - 1) / (student_config.n_layer - 1)
for student_layer, teacher_layer in enumerate(int(i * step) for i in range(student_config.n_layer)):
student.transformer.h[student_layer] = teacher.transformer.h[teacher_layer]
return student
| [
"torch.stack"
] | 1.7.0 | saridormi/commit_message_generation | c25db61a5f41accfb566caaea5feb0d275751293 |
1.7 | # -*- coding: utf-8 -*-
import torch
from supar.utils.common import MIN
from supar.utils.fn import pad
from torch.autograd import Function
def tarjan(sequence):
r"""
Tarjan algorithm for finding Strongly Connected Components (SCCs) of a graph.
Args:
sequence (list):
List of head indices.
Yields:
A list of indices making up a SCC. All self-loops are ignored.
Examples:
>>> next(tarjan([2, 5, 0, 3, 1])) # (1 -> 5 -> 2 -> 1) is a cycle
[2, 5, 1]
"""
sequence = [-1] + sequence
# record the search order, i.e., the timestep
dfn = [-1] * len(sequence)
# record the the smallest timestep in a SCC
low = [-1] * len(sequence)
# push the visited into the stack
stack, onstack = [], [False] * len(sequence)
def connect(i, timestep):
dfn[i] = low[i] = timestep[0]
timestep[0] += 1
stack.append(i)
onstack[i] = True
for j, head in enumerate(sequence):
if head != i:
continue
if dfn[j] == -1:
yield from connect(j, timestep)
low[i] = min(low[i], low[j])
elif onstack[j]:
low[i] = min(low[i], dfn[j])
# a SCC is completed
if low[i] == dfn[i]:
cycle = [stack.pop()]
while cycle[-1] != i:
onstack[cycle[-1]] = False
cycle.append(stack.pop())
onstack[i] = False
# ignore the self-loop
if len(cycle) > 1:
yield cycle
timestep = [0]
for i in range(len(sequence)):
if dfn[i] == -1:
yield from connect(i, timestep)
def chuliu_edmonds(s):
r"""
ChuLiu/Edmonds algorithm for non-projective decoding :cite:`mcdonald-etal-2005-non`.
Some code is borrowed from `tdozat's implementation`_.
Descriptions of notations and formulas can be found in :cite:`mcdonald-etal-2005-non`.
Notes:
The algorithm does not guarantee to parse a single-root tree.
Args:
s (~torch.Tensor): ``[seq_len, seq_len]``.
Scores of all dependent-head pairs.
Returns:
~torch.Tensor:
A tensor with shape ``[seq_len]`` for the resulting non-projective parse tree.
.. _tdozat's implementation:
https://github.com/tdozat/Parser-v3
"""
s[0, 1:] = MIN
# prevent self-loops
s.diagonal()[1:].fill_(MIN)
# select heads with highest scores
tree = s.argmax(-1)
# return the cycle finded by tarjan algorithm lazily
cycle = next(tarjan(tree.tolist()[1:]), None)
# if the tree has no cycles, then it is a MST
if not cycle:
return tree
# indices of cycle in the original tree
cycle = torch.tensor(cycle)
# indices of noncycle in the original tree
noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)
noncycle = torch.where(noncycle.gt(0))[0]
def contract(s):
# heads of cycle in original tree
cycle_heads = tree[cycle]
# scores of cycle in original tree
s_cycle = s[cycle, cycle_heads]
# calculate the scores of cycle's potential dependents
# s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle
s_dep = s[noncycle][:, cycle]
# find the best cycle head for each noncycle dependent
deps = s_dep.argmax(1)
# calculate the scores of cycle's potential heads
# s(x->c) = max(s(x'->x) - s(a(x')->x') + s(cycle)), x in noncycle and x' in cycle
# a(v) is the predecessor of v in cycle
# s(cycle) = sum(s(a(v)->v))
s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()
# find the best noncycle head for each cycle dependent
heads = s_head.argmax(0)
contracted = torch.cat((noncycle, torch.tensor([-1])))
# calculate the scores of contracted graph
s = s[contracted][:, contracted]
# set the contracted graph scores of cycle's potential dependents
s[:-1, -1] = s_dep[range(len(deps)), deps]
# set the contracted graph scores of cycle's potential heads
s[-1, :-1] = s_head[heads, range(len(heads))]
return s, heads, deps
# keep track of the endpoints of the edges into and out of cycle for reconstruction later
s, heads, deps = contract(s)
# y is the contracted tree
y = chuliu_edmonds(s)
# exclude head of cycle from y
y, cycle_head = y[:-1], y[-1]
# fix the subtree with no heads coming from the cycle
# len(y) denotes heads coming from the cycle
subtree = y < len(y)
# add the nodes to the new tree
tree[noncycle[subtree]] = noncycle[y[subtree]]
# fix the subtree with heads coming from the cycle
subtree = ~subtree
# add the nodes to the tree
tree[noncycle[subtree]] = cycle[deps[subtree]]
# fix the root of the cycle
cycle_root = heads[cycle_head]
# break the cycle and add the root of the cycle to the tree
tree[cycle[cycle_root]] = noncycle[cycle_head]
return tree
def mst(scores, mask, multiroot=False):
r"""
MST algorithm for decoding non-projective trees.
This is a wrapper for ChuLiu/Edmonds algorithm.
The algorithm first runs ChuLiu/Edmonds to parse a tree and then have a check of multi-roots,
If ``multiroot=True`` and there indeed exist multi-roots, the algorithm seeks to find
best single-root trees by iterating all possible single-root trees parsed by ChuLiu/Edmonds.
Otherwise the resulting trees are directly taken as the final outputs.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
multiroot (bool):
Ensures to parse a single-root tree If ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting non-projective parse trees.
Examples:
>>> scores = torch.tensor([[[-11.9436, -13.1464, -6.4789, -13.8917],
[-60.6957, -60.2866, -48.6457, -63.8125],
[-38.1747, -49.9296, -45.2733, -49.5571],
[-19.7504, -23.9066, -9.9139, -16.2088]]])
>>> scores[:, 0, 1:] = MIN
>>> scores.diagonal(0, 1, 2)[1:].fill_(MIN)
>>> mask = torch.tensor([[False, True, True, True]])
>>> mst(scores, mask)
tensor([[0, 2, 0, 2]])
"""
batch_size, seq_len, _ = scores.shape
scores = scores.cpu().unbind()
preds = []
for i, length in enumerate(mask.sum(1).tolist()):
s = scores[i][:length+1, :length+1]
tree = chuliu_edmonds(s)
roots = torch.where(tree[1:].eq(0))[0] + 1
if not multiroot and len(roots) > 1:
s_root = s[:, 0]
s_best = MIN
s = s.index_fill(1, torch.tensor(0), MIN)
for root in roots:
s[:, 0] = MIN
s[root, 0] = s_root[root]
t = chuliu_edmonds(s)
s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()
if s_tree > s_best:
s_best, tree = s_tree, t
preds.append(tree)
return pad(preds, total_length=seq_len).to(mask.device)
class SampledLogsumexp(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
ctx.save_for_backward(x)
return x.logsumexp(dim=dim)
@staticmethod
def backward(ctx, grad_output):
from torch.distributions import OneHotCategorical
x, dim = ctx.saved_tensors, ctx.dim
if ctx.needs_input_grad[0]:
return grad_output.unsqueeze(dim).mul(OneHotCategorical(logits=x.movedim(dim, -1)).sample().movedim(-1, dim)), None
return None, None
class Sparsemax(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
sorted_x, _ = x.sort(dim, True)
z = sorted_x.cumsum(dim) - 1
k = x.new_tensor(range(1, sorted_x.size(dim) + 1)).view(-1, *[1] * (x.dim() - 1)).transpose(0, dim)
k = (k * sorted_x).gt(z).sum(dim, True)
tau = z.gather(dim, k - 1) / k
p = torch.clamp(x - tau, 0)
ctx.save_for_backward(k, p)
return p
@staticmethod
def backward(ctx, grad_output):
k, p, dim = *ctx.saved_tensors, ctx.dim
grad = grad_output.masked_fill(p.eq(0), 0)
grad = torch.where(p.ne(0), grad - grad.sum(dim, True) / k, grad)
return grad, None
sampled_logsumexp = SampledLogsumexp.apply
sparsemax = Sparsemax.apply
| [
"torch.tensor",
"torch.clamp"
] | 1.7.1 | zysite/parser | 8ed9ccb8e542655fd6fd1b6f7faaf084d13a866e |
1.9 | # -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
from supar.models import (BiaffineDependencyModel, CRF2oDependencyModel,
CRFDependencyModel, VIDependencyModel)
from supar.parsers.parser import Parser
from supar.utils import Config, Dataset, Embedding
from supar.utils.common import BOS, PAD, UNK
from supar.utils.field import ChartField, Field, RawField, SubwordField
from supar.utils.fn import ispunct
from supar.utils.logging import get_logger, progress_bar
from supar.utils.metric import AttachmentMetric
from supar.utils.transform import CoNLL
logger = get_logger(__name__)
class BiaffineDependencyParser(Parser):
r"""
The implementation of Biaffine Dependency Parser :cite:`dozat-etal-2017-biaffine`.
"""
NAME = 'biaffine-dependency'
MODEL = BiaffineDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.TAG = self.transform.CPOS
self.ARC, self.REL = self.transform.HEAD, self.transform.DEPREL
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000,
punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=False, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'biaffine-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('biaffine-dep-en')
>>> parser = Parser.load('./ptb.biaffine.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.softmax(-1).unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
r"""
Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary.
Required if taking words as encoder input.
Default: 2.
fix_len (int):
The max length of all subword pieces. The excess part of each piece will be truncated.
Required if using CharLSTM/BERT.
Default: 20.
kwargs (dict):
A dict holding the unconsumed arguments.
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=ARC, DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class CRFDependencyParser(BiaffineDependencyParser):
r"""
The implementation of first-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.
"""
NAME = 'crf-dependency'
MODEL = CRFDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('crf-dep-en')
>>> parser = Parser.load('./ptb.crf.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf(s_arc, mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
class CRF2oDependencyParser(BiaffineDependencyParser):
r"""
The implementation of second-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.
"""
NAME = 'crf2o-dependency'
MODEL = CRF2oDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf2o-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('crf2o-dep-en')
>>> parser = Parser.load('./ptb.crf2o.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf((s_arc, s_sib), mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
r"""
Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary. Default: 2.
fix_len (int):
The max length of all subword pieces. The excess part of each piece will be truncated.
Required if using CharLSTM/BERT.
Default: 20.
kwargs (dict):
A dict holding the unconsumed arguments.
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
SIB = ChartField('sibs', bos=BOS, use_vocab=False, fn=CoNLL.get_sibs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=(ARC, SIB), DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class VIDependencyParser(BiaffineDependencyParser):
r"""
The implementation of Dependency Parser using Variational Inference (:cite:`wang-tu-2020-second`).
"""
NAME = 'vi-dependency'
MODEL = VIDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'vi-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('vi-dep-en')
>>> parser = Parser.load('./ptb.vi.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
s_arc = self.model.inference((s_arc, s_sib), mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
| [
"torch.no_grad",
"torch.cuda.is_available"
] | 1.9.0 | LiBinNLP/HOSDP | f0806d1c27c9d5233002836e1825a1567891d928 |
1.0 | import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import cv2
import config
from utils import Mesh
from models import CMR
from models.smpl_from_lib import SMPL
from utils.pose_utils import compute_similarity_transform_batch, \
scale_and_translation_transform_batch
from utils.cam_utils import orthographic_project_torch, undo_keypoint_normalisation
from datasets.my_3dpw_eval_dataset import PW3DEvalDataset
def evaluate_3dpw(model,
eval_dataset,
metrics,
device,
vis_save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000):
eval_dataloader = DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
drop_last=True,
num_workers=num_workers,
pin_memory=pin_memory)
smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1)
smpl_male = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='male')
smpl_female = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='female')
smpl.to(device)
smpl_male.to(device)
smpl_female.to(device)
J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
J_regressor_batch = J_regressor[None, :].to(device)
if 'pve' in metrics:
pve_smpl_sum = 0.0
pve_graph_sum = 0.0
pve_smpl_per_frame = []
pve_graph_per_frame = []
if 'pve_scale_corrected' in metrics:
pve_scale_corrected_smpl_sum = 0.0
pve_scale_corrected_graph_sum = 0.0
pve_scale_corrected_smpl_per_frame = []
pve_scale_corrected_graph_per_frame = []
if 'pve_pa' in metrics:
pve_pa_smpl_sum = 0.0
pve_pa_graph_sum = 0.0
pve_pa_smpl_per_frame = []
pve_pa_graph_per_frame = []
if 'pve-t' in metrics:
pvet_sum = 0.0
pvet_per_frame = []
if 'pve-t_scale_corrected' in metrics:
pvet_scale_corrected_sum = 0.0
pvet_scale_corrected_per_frame = []
if 'mpjpe' in metrics:
mpjpe_smpl_sum = 0.0
mpjpe_graph_sum = 0.0
mpjpe_smpl_per_frame = []
mpjpe_graph_per_frame = []
if 'mpjpe_scale_corrected' in metrics:
mpjpe_scale_corrected_smpl_sum = 0.0
mpjpe_scale_corrected_graph_sum = 0.0
mpjpe_scale_corrected_smpl_per_frame = []
mpjpe_scale_corrected_graph_per_frame = []
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl_sum = 0.0
j3d_rec_err_graph_sum = 0.0
j3d_rec_err_smpl_per_frame = []
j3d_rec_err_graph_per_frame = []
if 'pve_2d' in metrics:
pve_2d_smpl_sum = 0.0
pve_2d_graph_sum = 0.0
if 'pve_2d_scale_corrected' in metrics:
pve_2d_scale_corrected_smpl_sum = 0.0
pve_2d_scale_corrected_graph_sum = 0.0
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl_sum = 0.0
pve_2d_pa_graph_sum = 0.0
num_samples = 0
num_vertices = 6890
num_joints3d = 14
model.eval()
for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):
# ------------------------------- TARGETS and INPUTS -------------------------------
input = samples_batch['input']
input = input.to(device)
target_pose = samples_batch['pose'].to(device)
target_shape = samples_batch['shape'].to(device)
target_gender = samples_batch['gender'][0]
if target_gender == 'm':
target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_male(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
elif target_gender == 'f':
target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_female(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
# ------------------------------- PREDICTIONS -------------------------------
pred_vertices, pred_vertices_smpl, pred_camera, pred_rotmat, pred_betas = model(input)
pred_vertices_projected2d = orthographic_project_torch(pred_vertices, pred_camera)
pred_vertices_projected2d = undo_keypoint_normalisation(pred_vertices_projected2d, input.shape[-1])
pred_vertices_smpl_projected2d = orthographic_project_torch(pred_vertices_smpl, pred_camera)
pred_vertices_smpl_projected2d = undo_keypoint_normalisation(pred_vertices_smpl_projected2d, input.shape[-1])
pred_reposed_smpl_output = smpl(betas=pred_betas)
pred_reposed_vertices = pred_reposed_smpl_output.vertices
pred_joints_h36m = torch.matmul(J_regressor_batch, pred_vertices)
pred_joints_h36mlsp = pred_joints_h36m[:, config.H36M_TO_J14, :]
pred_joints_smpl_h36m = torch.matmul(J_regressor_batch, pred_vertices_smpl)
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36m[:, config.H36M_TO_J14, :]
# Numpy-fying
target_vertices = target_vertices.cpu().detach().numpy()
target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()
target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()
pred_vertices = pred_vertices.cpu().detach().numpy()
pred_vertices_smpl = pred_vertices_smpl.cpu().detach().numpy()
pred_vertices_projected2d = pred_vertices_projected2d.cpu().detach().numpy()
pred_vertices_smpl_projected2d = pred_vertices_smpl_projected2d.cpu().detach().numpy()
pred_reposed_vertices = pred_reposed_vertices.cpu().detach().numpy()
pred_joints_h36mlsp = pred_joints_h36mlsp.cpu().detach().numpy()
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36mlsp.cpu().detach().numpy()
# ------------------------------- METRICS -------------------------------
if 'pve' in metrics:
pve_smpl_batch = np.linalg.norm(pred_vertices_smpl - target_vertices, axis=-1) # (1, 6890)
pve_graph_batch = np.linalg.norm(pred_vertices - target_vertices, axis=-1)
pve_smpl_sum += np.sum(pve_smpl_batch) # scalar
pve_graph_sum += np.sum(pve_graph_batch)
pve_smpl_per_frame.append(np.mean(pve_smpl_batch, axis=-1))
pve_graph_per_frame.append(np.mean(pve_graph_batch, axis=-1))
# Scale and translation correction
if 'pve_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pve_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_sc - target_vertices,
axis=-1) # (1, 6890)
pve_sc_graph_batch = np.linalg.norm(pred_vertices_sc - target_vertices,
axis=-1) # (1, 6890)
pve_scale_corrected_smpl_sum += np.sum(pve_sc_smpl_batch) # scalar
pve_scale_corrected_graph_sum += np.sum(pve_sc_graph_batch) # scalar
pve_scale_corrected_smpl_per_frame.append(np.mean(pve_sc_smpl_batch, axis=-1))
pve_scale_corrected_graph_per_frame.append(np.mean(pve_sc_graph_batch, axis=-1))
# Procrustes analysis
if 'pve_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pve_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_graph_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_smpl_sum += np.sum(pve_pa_smpl_batch) # scalar
pve_pa_graph_sum += np.sum(pve_pa_graph_batch) # scalar
pve_pa_smpl_per_frame.append(np.mean(pve_pa_smpl_batch, axis=-1))
pve_pa_graph_per_frame.append(np.mean(pve_pa_graph_batch, axis=-1))
if 'pve-t' in metrics:
pvet_batch = np.linalg.norm(pred_reposed_vertices - target_reposed_vertices, axis=-1)
pvet_sum += np.sum(pvet_batch)
pvet_per_frame.append(np.mean(pvet_batch, axis=-1))
# Scale and translation correction
if 'pve-t_scale_corrected' in metrics:
pred_reposed_vertices_sc = scale_and_translation_transform_batch(pred_reposed_vertices,
target_reposed_vertices)
pvet_scale_corrected_batch = np.linalg.norm(pred_reposed_vertices_sc - target_reposed_vertices,
axis=-1) # (bs, 6890)
pvet_scale_corrected_sum += np.sum(pvet_scale_corrected_batch) # scalar
pvet_scale_corrected_per_frame.append(np.mean(pvet_scale_corrected_batch, axis=-1))
if 'mpjpe' in metrics:
mpjpe_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_graph_batch = np.linalg.norm(pred_joints_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_smpl_sum += np.sum(mpjpe_smpl_batch)
mpjpe_graph_sum += np.sum(mpjpe_graph_batch)
mpjpe_smpl_per_frame.append(np.mean(mpjpe_smpl_batch, axis=-1))
mpjpe_graph_per_frame.append(np.mean(mpjpe_graph_batch, axis=-1))
# Scale and translation correction
if 'mpjpe_scale_corrected' in metrics:
pred_joints_smpl_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_h36mlsp,
target_joints_h36mlsp)
mpjpe_scale_corrected_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_graph_batch = np.linalg.norm(pred_joints_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_smpl_sum += np.sum(mpjpe_scale_corrected_smpl_batch)
mpjpe_scale_corrected_graph_sum += np.sum(mpjpe_scale_corrected_graph_batch)
mpjpe_scale_corrected_smpl_per_frame.append(np.mean(mpjpe_scale_corrected_smpl_batch, axis=-1))
mpjpe_scale_corrected_graph_per_frame.append(np.mean(mpjpe_scale_corrected_graph_batch, axis=-1))
# Procrustes analysis
if 'j3d_rec_err' in metrics:
pred_joints_smpl_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_h36mlsp, target_joints_h36mlsp)
j3d_rec_err_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_graph_batch = np.linalg.norm(pred_joints_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_smpl_sum += np.sum(j3d_rec_err_smpl_batch)
j3d_rec_err_graph_sum += np.sum(j3d_rec_err_graph_batch)
j3d_rec_err_smpl_per_frame.append(np.mean(j3d_rec_err_smpl_batch, axis=-1))
j3d_rec_err_graph_per_frame.append(np.mean(j3d_rec_err_graph_batch, axis=-1))
if 'pve_2d' in metrics:
pred_vertices_smpl_2d = pred_vertices_smpl[:, :, :2]
pred_vertices_2d = pred_vertices[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_graph_batch = np.linalg.norm(pred_vertices_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_smpl_sum += np.sum(pve_2d_smpl_batch)
pve_2d_graph_sum += np.sum(pve_2d_graph_batch)
# Scale and translation correction
if 'pve_2d_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pred_vertices_smpl_2d_sc = pred_vertices_smpl_sc[:, :, :2]
pred_vertices_2d_sc = pred_vertices_sc[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_sc_graph_batch = np.linalg.norm(pred_vertices_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_scale_corrected_smpl_sum += np.sum(pve_2d_sc_smpl_batch)
pve_2d_scale_corrected_graph_sum += np.sum(pve_2d_sc_graph_batch)
# Procrustes analysis
if 'pve_2d_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pred_vertices_smpl_2d_pa = pred_vertices_smpl_pa[:, :, :2]
pred_vertices_2d_pa = pred_vertices_pa[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_graph_batch = np.linalg.norm(pred_vertices_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_smpl_sum += np.sum(pve_2d_pa_smpl_batch)
pve_2d_pa_graph_sum += np.sum(pve_2d_pa_graph_batch)
num_samples += target_pose.shape[0]
# ------------------------------- VISUALISE -------------------------------
if vis_every_n_batches is not None:
if batch_num % vis_every_n_batches == 0:
vis_imgs = samples_batch['vis_img'].numpy()
vis_imgs = np.transpose(vis_imgs, [0, 2, 3, 1])
fnames = samples_batch['fname']
plt.figure(figsize=(16, 12))
plt.subplot(341)
plt.imshow(vis_imgs[0])
plt.subplot(342)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_projected2d[0, :, 0], pred_vertices_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(343)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_smpl_projected2d[0, :, 0], pred_vertices_smpl_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(345)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices[0, :, 0], pred_vertices[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(346)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl[0, :, 0], pred_vertices_smpl[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(347)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_pa[0, :, 0], pred_vertices_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(348)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl_pa[0, :, 0], pred_vertices_smpl_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(349)
plt.scatter(target_reposed_vertices[0, :, 0], target_reposed_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_reposed_vertices_sc[0, :, 0], pred_reposed_vertices_sc[0, :, 1], s=0.1, c='r')
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 10)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 11)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 12)
for j in range(num_joints3d):
plt.scatter(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
# plt.show()
save_fig_path = os.path.join(vis_save_path, fnames[0])
plt.savefig(save_fig_path, bbox_inches='tight')
plt.close()
if 'pve' in metrics:
pve_smpl = pve_smpl_sum / (num_samples * num_vertices)
print('PVE SMPL: {:.5f}'.format(pve_smpl))
pve_graph = pve_graph_sum / (num_samples * num_vertices)
print('PVE GRAPH: {:.5f}'.format(pve_graph))
pve_smpl_per_frame = np.concatenate(pve_smpl_per_frame, axis=0)
pve_graph_per_frame = np.concatenate(pve_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_per_frame.npy'), pve_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_graph_per_frame.npy'), pve_graph_per_frame)
if 'pve_scale_corrected' in metrics:
pve_sc_smpl = pve_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE SC SMPL: {:.5f}'.format(pve_sc_smpl))
pve_sc_graph = pve_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE SC GRAPH: {:.5f}'.format(pve_sc_graph))
pve_scale_corrected_smpl_per_frame = np.concatenate(pve_scale_corrected_smpl_per_frame, axis=0)
pve_scale_corrected_graph_per_frame = np.concatenate(pve_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_scale_corrected_per_frame.npy'),
pve_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_scale_corrected_graph_per_frame.npy'),
pve_scale_corrected_graph_per_frame)
if 'pve_pa' in metrics:
pve_pa_smpl = pve_pa_smpl_sum / (num_samples * num_vertices)
print('PVE PA SMPL: {:.5f}'.format(pve_pa_smpl))
pve_pa_graph = pve_pa_graph_sum / (num_samples * num_vertices)
print('PVE PA GRAPH: {:.5f}'.format(pve_pa_graph))
pve_pa_smpl_per_frame = np.concatenate(pve_pa_smpl_per_frame, axis=0)
pve_pa_graph_per_frame = np.concatenate(pve_pa_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_pa_per_frame.npy'), pve_pa_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_pa_graph_per_frame.npy'), pve_pa_graph_per_frame)
if 'pve-t' in metrics:
pvet = pvet_sum / (num_samples * num_vertices)
print('PVE-T: {:.5f}'.format(pvet))
pvet_per_frame = np.concatenate(pvet_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_per_frame.npy'), pvet_per_frame)
if 'pve-t_scale_corrected' in metrics:
pvet_sc = pvet_scale_corrected_sum / (num_samples * num_vertices)
print('PVE-T SC: {:.5f}'.format(pvet_sc))
pvet_scale_corrected_per_frame = np.concatenate(pvet_scale_corrected_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_scale_corrected_per_frame.npy'),
pvet_scale_corrected_per_frame)
if 'mpjpe' in metrics:
mpjpe_smpl = mpjpe_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SMPL: {:.5f}'.format(mpjpe_smpl))
mpjpe_graph = mpjpe_graph_sum / (num_samples * num_joints3d)
print('MPJPE GRAPH: {:.5f}'.format(mpjpe_graph))
mpjpe_smpl_per_frame = np.concatenate(mpjpe_smpl_per_frame, axis=0)
mpjpe_graph_per_frame = np.concatenate(mpjpe_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_per_frame.npy'), mpjpe_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_graph_per_frame.npy'), mpjpe_graph_per_frame)
if 'mpjpe_scale_corrected' in metrics:
mpjpe_sc_smpl = mpjpe_scale_corrected_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SC SMPL: {:.5f}'.format(mpjpe_sc_smpl))
mpjpe_sc_graph = mpjpe_scale_corrected_graph_sum / (num_samples * num_joints3d)
print('MPJPE SC GRAPH: {:.5f}'.format(mpjpe_sc_graph))
mpjpe_scale_corrected_smpl_per_frame = np.concatenate(
mpjpe_scale_corrected_smpl_per_frame, axis=0)
mpjpe_scale_corrected_graph_per_frame = np.concatenate(
mpjpe_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_per_frame.npy'),
mpjpe_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_graph_per_frame.npy'),
mpjpe_scale_corrected_graph_per_frame)
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl = j3d_rec_err_smpl_sum / (num_samples * num_joints3d)
print('Rec Err SMPL: {:.5f}'.format(j3d_rec_err_smpl))
j3d_rec_err_graph = j3d_rec_err_graph_sum / (num_samples * num_joints3d)
print('Rec Err GRAPH: {:.5f}'.format(j3d_rec_err_graph))
j3d_rec_err_smpl_per_frame = np.concatenate(j3d_rec_err_smpl_per_frame, axis=0)
j3d_rec_err_graph_per_frame = np.concatenate(j3d_rec_err_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'j3d_rec_err_per_frame.npy'),
j3d_rec_err_smpl_per_frame)
np.save(os.path.join(save_path, 'j3d_rec_err_graph_per_frame.npy'),
j3d_rec_err_graph_per_frame)
if 'pve_2d' in metrics:
pve_2d_smpl = pve_2d_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SMPL: {:.5f}'.format(pve_2d_smpl))
pve_2d_graph = pve_2d_graph_sum / (num_samples * num_vertices)
print('PVE 2D GRAPH: {:.5f}'.format(pve_2d_graph))
if 'pve_2d_scale_corrected' in metrics:
pve_2d_sc_smpl = pve_2d_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SC SMPL: {:.5f}'.format(pve_2d_sc_smpl))
pve_2d_sc_graph = pve_2d_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE 2D SC GRAPH: {:.5f}'.format(pve_2d_sc_graph))
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl = pve_2d_pa_smpl_sum / (num_samples * num_vertices)
print('PVE 2D PA SMPL: {:.5f}'.format(pve_2d_pa_smpl))
pve_2d_pa_graph = pve_2d_pa_graph_sum / (num_samples * num_vertices)
print('PVE 2D PA GRAPH: {:.5f}'.format(pve_2d_pa_graph))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default=None, help='Path to network checkpoint')
parser.add_argument('--gpu', default="0", type=str, help='GPU')
args = parser.parse_args()
# Device
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load model
mesh = Mesh(device=device)
# Our pretrained networks have 5 residual blocks with 256 channels.
# You might want to change this if you use a different architecture.
model = CMR(mesh, 5, 256, pretrained_checkpoint=args.checkpoint, device=device)
model.to(device)
model.eval()
# Setup evaluation dataset
dataset_path = '/scratch2/as2562/datasets/3DPW/test'
dataset = PW3DEvalDataset(dataset_path, img_wh=config.INPUT_RES)
print("Eval examples found:", len(dataset))
# Metrics
metrics = ['pve', 'pve-t', 'pve_pa', 'pve-t_pa', 'mpjpe', 'j3d_rec_err',
'pve_2d', 'pve_2d_pa', 'pve_2d_scale_corrected',
'pve_scale_corrected', 'pve-t_scale_corrected', 'mpjpe_scale_corrected']
save_path = '/data/cvfs/as2562/GraphCMR/evaluations/3dpw'
if not os.path.exists(save_path):
os.makedirs(save_path)
# Run evaluation
evaluate_3dpw(model=model,
eval_dataset=dataset,
metrics=metrics,
device=device,
vis_save_path=save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000)
| [
"torch.device",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.matmul"
] | 1.0.0 | akashsengupta1997/GraphCMR | 0b8b05be4f711995ba50e414effbde98b6b11c5b |
1.5 | import logging
from collections import OrderedDict
from pathlib import Path
from typing import List, Optional, Set, Tuple, Union
import numpy as np
import torch
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import minmax_scale
from tqdm import tqdm
import flair
from flair.data import Dictionary, Sentence, Span, SpanLabel
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import (
TokenEmbeddings,
TransformerDocumentEmbeddings,
TransformerWordEmbeddings,
)
from flair.file_utils import cached_path
from flair.models.sequence_tagger_model import SequenceTagger
from flair.models.text_classification_model import TextClassifier
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class FewshotClassifier(flair.nn.Classifier[Sentence]):
def __init__(self):
self._current_task = None
self._task_specific_attributes = {}
self.label_nearest_map = None
self.tars_model: flair.nn.Classifier[Sentence]
super(FewshotClassifier, self).__init__()
def forward_loss(
self, data_points: Union[List[Sentence], Sentence]
) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]:
if not isinstance(data_points, list):
data_points = [data_points]
# Transform input data into TARS format
sentences = self._get_tars_formatted_sentences(data_points)
loss = self.tars_model.forward_loss(sentences)
return loss
@property
def tars_embeddings(self):
raise NotImplementedError
def _get_tars_formatted_sentence(self, label, sentence):
raise NotImplementedError
def _get_tars_formatted_sentences(self, sentences: List[Sentence]):
label_text_pairs = []
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
for sentence in sentences:
label_text_pairs_for_sentence = []
if self.training and self.num_negative_labels_to_sample is not None:
positive_labels = list(
OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)])
)
sampled_negative_labels = self._get_nearest_labels_for(positive_labels)
for label in positive_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
for label in sampled_negative_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
else:
for label in all_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
label_text_pairs.extend(label_text_pairs_for_sentence)
return label_text_pairs
def _get_nearest_labels_for(self, labels):
# if there are no labels, return a random sample as negatives
if len(labels) == 0:
tags = self.get_current_label_dictionary().get_items()
import random
sample = random.sample(tags, k=self.num_negative_labels_to_sample)
return sample
already_sampled_negative_labels = set()
# otherwise, go through all labels
for label in labels:
plausible_labels = []
plausible_label_probabilities = []
for plausible_label in self.label_nearest_map[label]:
if plausible_label in already_sampled_negative_labels or plausible_label in labels:
continue
else:
plausible_labels.append(plausible_label)
plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label])
# make sure the probabilities always sum up to 1
plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64")
plausible_label_probabilities += 1e-08
plausible_label_probabilities /= np.sum(plausible_label_probabilities)
if len(plausible_labels) > 0:
num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))
sampled_negative_labels = np.random.choice(
plausible_labels,
num_samples,
replace=False,
p=plausible_label_probabilities,
)
already_sampled_negative_labels.update(sampled_negative_labels)
return already_sampled_negative_labels
def train(self, mode=True):
"""Populate label similarity map based on cosine similarity before running epoch
If the `num_negative_labels_to_sample` is set to an integer value then before starting
each epoch the model would create a similarity measure between the label names based
on cosine distances between their BERT encoded embeddings.
"""
if mode and self.num_negative_labels_to_sample is not None:
self._compute_label_similarity_for_current_epoch()
super().train(mode)
super().train(mode)
def _compute_label_similarity_for_current_epoch(self):
"""
Compute the similarity between all labels for better sampling of negatives
"""
# get and embed all labels by making a Sentence object that contains only the label text
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
label_sentences = [Sentence(label) for label in all_labels]
self.tars_embeddings.eval() # TODO: check if this is necessary
self.tars_embeddings.embed(label_sentences)
self.tars_embeddings.train()
# get each label embedding and scale between 0 and 1
if isinstance(self.tars_embeddings, TokenEmbeddings):
encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences]
else:
encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences]
normalized_encoding = minmax_scale(encodings_np)
# compute similarity matrix
similarity_matrix = cosine_similarity(normalized_encoding)
# the higher the similarity, the greater the chance that a label is
# sampled as negative example
negative_label_probabilities = {}
for row_index, label in enumerate(all_labels):
negative_label_probabilities[label] = {}
for column_index, other_label in enumerate(all_labels):
if label != other_label:
negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index]
self.label_nearest_map = negative_label_probabilities
def get_current_label_dictionary(self):
label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"]
return label_dictionary
def get_current_label_type(self):
return self._task_specific_attributes[self._current_task]["label_type"]
def is_current_task_multi_label(self):
return self._task_specific_attributes[self._current_task]["multi_label"]
def add_and_switch_to_new_task(
self,
task_name,
label_dictionary: Union[List, Set, Dictionary, str],
label_type: str,
multi_label: bool = True,
force_switch: bool = False,
):
"""
Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches'
to the new task. Parameters are similar to the constructor except for model choice, batch
size and negative sampling. This method does not store the resultant model onto disk.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of the labels you want to predict
:param label_type: string to identify the label type ('ner', 'sentiment', etc.)
:param multi_label: whether this task is a multi-label prediction problem
:param force_switch: if True, will overwrite existing task with same name
"""
if task_name in self._task_specific_attributes and not force_switch:
log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name)
else:
# make label dictionary if no Dictionary object is passed
if isinstance(label_dictionary, Dictionary):
label_dictionary = label_dictionary.get_items()
if type(label_dictionary) == str:
label_dictionary = [label_dictionary]
# prepare dictionary of tags (without B- I- prefixes and without UNK)
tag_dictionary = Dictionary(add_unk=False)
for tag in label_dictionary:
if tag == "<unk>" or tag == "O":
continue
if tag[1] == "-":
tag = tag[2:]
tag_dictionary.add_item(tag)
else:
tag_dictionary.add_item(tag)
self._task_specific_attributes[task_name] = {
"label_dictionary": tag_dictionary,
"label_type": label_type,
"multi_label": multi_label,
}
self.switch_to_task(task_name)
def list_existing_tasks(self) -> Set[str]:
"""
Lists existing tasks in the loaded TARS model on the console.
"""
return set(self._task_specific_attributes.keys())
def switch_to_task(self, task_name):
"""
Switches to a task which was previously added.
"""
if task_name not in self._task_specific_attributes:
log.error(
"Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.",
task_name,
)
else:
self._current_task = task_name
def _drop_task(self, task_name):
if task_name in self._task_specific_attributes:
if self._current_task == task_name:
log.error(
"`%s` is the current task." " Switch to some other task before dropping this.",
task_name,
)
else:
self._task_specific_attributes.pop(task_name)
else:
log.warning("No task exists with the name `%s`.", task_name)
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
@property
def label_type(self):
return self.get_current_label_type()
def predict_zero_shot(
self,
sentences: Union[List[Sentence], Sentence],
candidate_label_set: Union[List[str], Set[str], str],
multi_label: bool = True,
):
"""
Method to make zero shot predictions from the TARS model
:param sentences: input sentence objects to classify
:param candidate_label_set: set of candidate labels
:param multi_label: indicates whether multi-label or single class prediction. Defaults to True.
"""
# check if candidate_label_set is empty
if candidate_label_set is None or len(candidate_label_set) == 0:
log.warning("Provided candidate_label_set is empty")
return
# make list if only one candidate label is passed
if isinstance(candidate_label_set, str):
candidate_label_set = {candidate_label_set}
# create label dictionary
label_dictionary = Dictionary(add_unk=False)
for label in candidate_label_set:
label_dictionary.add_item(label)
# note current task
existing_current_task = self._current_task
# create a temporary task
self.add_and_switch_to_new_task(
task_name="ZeroShot",
label_dictionary=label_dictionary,
label_type="-".join(label_dictionary.get_items()),
multi_label=multi_label,
)
try:
# make zero shot predictions
self.predict(sentences)
finally:
# switch to the pre-existing task
self.switch_to_task(existing_current_task)
self._drop_task("ZeroShot")
return
class TARSTagger(FewshotClassifier):
"""
TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class
sequence labeler which given a <label, text> pair predicts the probability for each word
to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
):
"""
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
"""
super(TARSTagger, self).__init__()
if isinstance(embeddings, str):
embeddings = TransformerWordEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item("entity")
tars_dictionary.span_labels = True
# initialize a bare-bones sequence tagger
self.tars_model: SequenceTagger = SequenceTagger(
hidden_size=123,
embeddings=embeddings,
tag_dictionary=tars_dictionary,
tag_type=self.static_label_type,
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
def _get_tars_formatted_sentence(self, label, sentence):
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" "))
# make a tars sentence where all labels are O by default
tars_sentence = Sentence(label_text_pair, use_tokenizer=False)
for entity_label in sentence.get_labels(self.label_type):
if entity_label.value == label:
new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span]
tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity"))
return tars_sentence
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"current_task": self._current_task,
"tag_type": self.get_current_label_type(),
"tag_dictionary": self.get_current_label_dictionary(),
"tars_model": self.tars_model,
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"prefix": self.prefix,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _fetch_model(model_name) -> str:
if model_name == "tars-ner":
cache_dir = Path("models")
model_name = cached_path(
"https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt",
cache_dir=cache_dir,
)
return model_name
@staticmethod
def _init_model_with_state_dict(state):
# init new TARS classifier
model = TARSTagger(
task_name=state["current_task"],
label_dictionary=state["tag_dictionary"],
label_type=state["tag_type"],
embeddings=state["tars_model"].embeddings,
num_negative_labels_to_sample=state["num_negative_labels_to_sample"],
prefix=state["prefix"],
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
# linear layers of internal classifier
model.load_state_dict(state["state_dict"])
return model
@property
def tars_embeddings(self):
return self.tars_model.embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
most_probable_first: bool = True,
):
# return
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
# with torch.no_grad():
if not sentences:
return sentences
if not isinstance(sentences, list):
sentences = [sentences]
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
overall_count = 0
with torch.no_grad():
for batch in dataloader:
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
# go through each sentence in the batch
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
all_detected = {}
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
loss_and_count = self.tars_model.predict(
tars_sentence,
label_name=label_name,
return_loss=True,
)
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
for predicted in tars_sentence.get_labels(label_name):
predicted.value = label
all_detected[predicted] = predicted.score
if most_probable_first:
import operator
already_set_indices: List[int] = []
sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1))
sorted_x.reverse()
for tuple in sorted_x:
# get the span and its label
label = tuple[0]
# label = span.get_labels("tars_temp_label")[0].value
label_length = (
0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" "))
)
# determine whether tokens in this span already have a label
tag_this = True
for token in label.span:
corresponding_token = sentence.get_token(token.idx - label_length)
if corresponding_token is None:
tag_this = False
continue
if token.idx in already_set_indices:
tag_this = False
continue
# only add if all tokens have no label
if tag_this:
already_set_indices.extend(token.idx for token in label.span)
predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span]
sentence.add_complex_label(
label_name,
label=SpanLabel(Span(predicted_span), value=label.value, score=label.score),
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
class TARSClassifier(FewshotClassifier):
"""
TARS model for text classification. In the backend, the model uses a BERT based binary
text classifier which given a <label, text> pair predicts the probability of two classes
"True", and "False". The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
LABEL_MATCH = "YES"
LABEL_NO_MATCH = "NO"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
):
"""
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
:param multi_label: auto-detected by default, but you can set this to True
to force multi-label predictionor False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super(TARSClassifier, self).__init__()
if isinstance(embeddings, str):
embeddings = TransformerDocumentEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item(self.LABEL_NO_MATCH)
tars_dictionary.add_item(self.LABEL_MATCH)
# initialize a bare-bones sequence tagger
self.tars_model = TextClassifier(
document_embeddings=embeddings,
label_dictionary=tars_dictionary,
label_type=self.static_label_type,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
self.clean_up_labels = True
def _clean(self, label_value: str) -> str:
if self.clean_up_labels:
return label_value.replace("_", " ")
else:
return label_value
def _get_tars_formatted_sentence(self, label, sentence):
label = self._clean(label)
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())]
tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH
tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label)
return tars_sentence
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"current_task": self._current_task,
"label_type": self.get_current_label_type(),
"label_dictionary": self.get_current_label_dictionary(),
"tars_model": self.tars_model,
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _init_model_with_state_dict(state):
# init new TARS classifier
label_dictionary = state["label_dictionary"]
label_type = "default_label" if not state["label_type"] else state["label_type"]
model: TARSClassifier = TARSClassifier(
task_name=state["current_task"],
label_dictionary=label_dictionary,
label_type=label_type,
embeddings=state["tars_model"].document_embeddings,
num_negative_labels_to_sample=state["num_negative_labels_to_sample"],
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
# linear layers of internal classifier
model.load_state_dict(state["state_dict"])
return model
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@property
def tars_embeddings(self):
return self.tars_model.document_embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
label_threshold: float = 0.5,
multi_label: Optional[bool] = None,
):
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
if multi_label is None:
multi_label = self.is_current_task_multi_label()
# with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, Sentence):
sentences = [sentences]
# set context if not set already
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set():
continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence:
previous_sentence._next_sentence = sentence
previous_sentence = sentence
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progressbar = tqdm(dataloader)
progressbar.set_description("Batch inference")
dataloader = progressbar
overall_loss = 0
overall_count = 0
batch_no = 0
with torch.no_grad():
for batch in dataloader:
batch_no += 1
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
# go through each sentence in the batch
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
best_label = None
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
loss_and_count = self.tars_model.predict(
tars_sentence,
label_name=label_name,
return_loss=True,
return_probabilities_for_all_classes=True if label_threshold < 0.5 else False,
)
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
# add all labels that according to TARS match the text and are above threshold
for predicted_tars_label in tars_sentence.get_labels(label_name):
if (
predicted_tars_label.value == self.LABEL_MATCH
and predicted_tars_label.score > label_threshold
):
# do not add labels below confidence threshold
sentence.add_label(label_name, label, predicted_tars_label.score)
# only use label with highest confidence if enforcing single-label predictions
if not multi_label:
if len(sentence.get_labels(label_name)) > 0:
# get all label scores and do an argmax to get the best label
label_scores = torch.tensor(
[label.score for label in sentence.get_labels(label_name)],
dtype=torch.float,
)
best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)]
# remove previously added labels and only add the best label
sentence.remove_labels(label_name)
sentence.add_label(
typename=label_name,
value=best_label.value,
score=best_label.score,
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
| [
"torch.no_grad",
"torch.argmax"
] | 1.5.0 | marleneDebatin/flair | 4d17509f358158f66d43e85db1b6990523b0b095 |
1.8 |
import torch
import random
import numpy as np
class InfiniteDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dataset_iterator = super().__iter__()
def __iter__(self):
return self
def __next__(self):
try:
batch = next(self.dataset_iterator)
except StopIteration:
self.dataset_iterator = super().__iter__()
batch = next(self.dataset_iterator)
return batch
def make_deterministic(seed=0):
"""Make results deterministic. If seed == -1, do not make deterministic.
Running your script in a deterministic way might slow it down.
Note that for some packages (eg: sklearn's PCA) this function is not enough.
"""
seed = int(seed)
if seed == -1:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def setup_logging(output_folder, exist_ok=False, console="debug",
info_filename="info.log", debug_filename="debug.log"):
"""Set up logging files and console output.
Creates one file for INFO logs and one for DEBUG logs.
Args:
output_folder (str): creates the folder where to save the files.
exist_ok (boolean): if False throw a FileExistsError if output_folder already exists
debug (str):
if == "debug" prints on console debug messages and higher
if == "info" prints on console info messages and higher
if == None does not use console (useful when a logger has already been set)
info_filename (str): the name of the info file. if None, don't create info file
debug_filename (str): the name of the debug file. if None, don't create debug file
"""
import os
import sys
import logging
import traceback
if not exist_ok and os.path.exists(output_folder):
raise FileExistsError(f"{output_folder} already exists!")
os.makedirs(output_folder, exist_ok=True)
base_formatter = logging.Formatter('%(asctime)s %(message)s', "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
if info_filename != None:
info_file_handler = logging.FileHandler(f'{output_folder}/{info_filename}')
info_file_handler.setLevel(logging.INFO)
info_file_handler.setFormatter(base_formatter)
logger.addHandler(info_file_handler)
if debug_filename != None:
debug_file_handler = logging.FileHandler(f'{output_folder}/{debug_filename}')
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(base_formatter)
logger.addHandler(debug_file_handler)
if console != None:
console_handler = logging.StreamHandler()
if console == "debug": console_handler.setLevel(logging.DEBUG)
if console == "info": console_handler.setLevel(logging.INFO)
console_handler.setFormatter(base_formatter)
logger.addHandler(console_handler)
def my_handler(type_, value, tb):
logger.info("\n" + "".join(traceback.format_exception(type, value, tb)))
logging.info("Experiment finished (with some errors)")
sys.excepthook = my_handler
| [
"torch.manual_seed",
"torch.cuda.manual_seed_all"
] | 1.8.2 | gmberton/CosPlace | 0f03cc9fe25919c87627e92535f3693747617eae |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.