Spaces:
Running
Running
import torch | |
from torch import Tensor | |
from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _default_to_fused_or_foreach, | |
_get_scalar_dtype, _differentiable_doc, _maximize_doc, _foreach_doc, _view_as_real, | |
_capturable_doc) | |
from typing import List, Optional | |
__all__ = ["Adamax", "adamax"] | |
class Adamax(Optimizer): | |
def __init__( | |
self, | |
params, | |
lr=2e-3, | |
betas=(0.9, 0.999), | |
eps=1e-8, | |
weight_decay=0, | |
foreach: Optional[bool] = None, | |
*, | |
maximize: bool = False, | |
differentiable: bool = False, | |
capturable: bool = False, | |
): | |
if not 0.0 <= lr: | |
raise ValueError(f"Invalid learning rate: {lr}") | |
if not 0.0 <= eps: | |
raise ValueError(f"Invalid epsilon value: {eps}") | |
if not 0.0 <= betas[0] < 1.0: | |
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") | |
if not 0.0 <= betas[1] < 1.0: | |
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") | |
if not 0.0 <= weight_decay: | |
raise ValueError(f"Invalid weight_decay value: {weight_decay}") | |
defaults = dict( | |
lr=lr, | |
betas=betas, | |
eps=eps, | |
weight_decay=weight_decay, | |
foreach=foreach, | |
maximize=maximize, | |
differentiable=differentiable, | |
capturable=capturable, | |
) | |
super().__init__(params, defaults) | |
def __setstate__(self, state): | |
super().__setstate__(state) | |
for group in self.param_groups: | |
group.setdefault("foreach", None) | |
group.setdefault("maximize", False) | |
group.setdefault("differentiable", False) | |
group.setdefault("capturable", False) | |
for p in group["params"]: | |
p_state = self.state.get(p, []) | |
if len(p_state) != 0 and not torch.is_tensor(p_state['step']): | |
step_val = float(p_state["step"]) | |
p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device) if group['capturable'] | |
else torch.tensor(step_val, dtype=_get_scalar_dtype())) | |
def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps): | |
has_complex = False | |
for p in group["params"]: | |
if p.grad is None: | |
continue | |
has_complex |= torch.is_complex(p) | |
params_with_grad.append(p) | |
if p.grad.is_sparse: | |
raise RuntimeError("Adamax does not support sparse gradients") | |
grads.append(p.grad) | |
state = self.state[p] | |
# State initialization | |
if len(state) == 0: | |
state['step'] = (torch.zeros((), dtype=_get_scalar_dtype(), device=p.device) | |
if group['capturable'] else torch.tensor(0.0, dtype=_get_scalar_dtype())) | |
state["exp_avg"] = torch.zeros_like( | |
p, memory_format=torch.preserve_format | |
) | |
state["exp_inf"] = torch.zeros_like( | |
p, memory_format=torch.preserve_format | |
) | |
exp_avgs.append(state["exp_avg"]) | |
exp_infs.append(state["exp_inf"]) | |
state_steps.append(state["step"]) | |
return has_complex | |
def step(self, closure=None): | |
"""Performs a single optimization step. | |
Args: | |
closure (Callable, optional): A closure that reevaluates the model | |
and returns the loss. | |
""" | |
self._cuda_graph_capture_health_check() | |
loss = None | |
if closure is not None: | |
with torch.enable_grad(): | |
loss = closure() | |
for group in self.param_groups: | |
params_with_grad = [] | |
grads = [] | |
exp_avgs = [] | |
exp_infs = [] | |
state_steps = [] | |
beta1, beta2 = group["betas"] | |
eps = group["eps"] | |
lr = group["lr"] | |
weight_decay = group["weight_decay"] | |
foreach = group["foreach"] | |
maximize = group["maximize"] | |
differentiable = group["differentiable"] | |
capturable = group["capturable"] | |
has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_infs, state_steps) | |
adamax( | |
params_with_grad, | |
grads, | |
exp_avgs, | |
exp_infs, | |
state_steps, | |
eps=eps, | |
beta1=beta1, | |
beta2=beta2, | |
lr=lr, | |
weight_decay=weight_decay, | |
foreach=foreach, | |
maximize=maximize, | |
differentiable=differentiable, | |
capturable=capturable, | |
has_complex=has_complex, | |
) | |
return loss | |
Adamax.__doc__ = r"""Implements Adamax algorithm (a variant of Adam based on infinity norm). | |
.. math:: | |
\begin{aligned} | |
&\rule{110mm}{0.4pt} \\ | |
&\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 | |
\text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}, | |
\: \lambda \text{ (weight decay)}, \\ | |
&\hspace{13mm} \epsilon \text{ (epsilon)} \\ | |
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, | |
u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex] | |
&\rule{110mm}{0.4pt} \\ | |
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ | |
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ | |
&\hspace{5mm}if \: \lambda \neq 0 \\ | |
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ | |
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ | |
&\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\ | |
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\ | |
&\rule{110mm}{0.4pt} \\[-1.ex] | |
&\bf{return} \: \theta_t \\[-1.ex] | |
&\rule{110mm}{0.4pt} \\[-1.ex] | |
\end{aligned} | |
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. | |
""" + fr""" | |
Args: | |
params (iterable): iterable of parameters to optimize or dicts defining | |
parameter groups | |
lr (float, optional): learning rate (default: 2e-3) | |
betas (Tuple[float, float], optional): coefficients used for computing | |
running averages of gradient and its square | |
eps (float, optional): term added to the denominator to improve | |
numerical stability (default: 1e-8) | |
weight_decay (float, optional): weight decay (L2 penalty) (default: 0) | |
{_foreach_doc} | |
{_maximize_doc} | |
{_differentiable_doc} | |
{_capturable_doc} | |
.. _Adam\: A Method for Stochastic Optimization: | |
https://arxiv.org/abs/1412.6980 | |
""" | |
def adamax( | |
params: List[Tensor], | |
grads: List[Tensor], | |
exp_avgs: List[Tensor], | |
exp_infs: List[Tensor], | |
state_steps: List[Tensor], | |
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 | |
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim | |
foreach: Optional[bool] = None, | |
maximize: bool = False, | |
differentiable: bool = False, | |
capturable: bool = False, | |
has_complex: bool = False, | |
*, | |
eps: float, | |
beta1: float, | |
beta2: float, | |
lr: float, | |
weight_decay: float, | |
): | |
r"""Functional API that performs adamax algorithm computation. | |
See :class:`~torch.optim.Adamax` for details. | |
""" | |
if not all(isinstance(t, torch.Tensor) for t in state_steps): | |
raise RuntimeError( | |
"API has changed, `state_steps` argument must contain a list of singleton tensors" | |
) | |
if foreach is None: | |
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) | |
if foreach and torch.jit.is_scripting(): | |
raise RuntimeError("torch.jit.script not supported with foreach optimizers") | |
if foreach and not torch.jit.is_scripting(): | |
func = _multi_tensor_adamax | |
else: | |
func = _single_tensor_adamax | |
func( | |
params, | |
grads, | |
exp_avgs, | |
exp_infs, | |
state_steps, | |
eps=eps, | |
beta1=beta1, | |
beta2=beta2, | |
lr=lr, | |
weight_decay=weight_decay, | |
maximize=maximize, | |
differentiable=differentiable, | |
has_complex=has_complex, | |
capturable=capturable, | |
) | |
def _single_tensor_adamax( | |
params: List[Tensor], | |
grads: List[Tensor], | |
exp_avgs: List[Tensor], | |
exp_infs: List[Tensor], | |
state_steps: List[Tensor], | |
*, | |
eps: float, | |
beta1: float, | |
beta2: float, | |
lr: float, | |
weight_decay: float, | |
maximize: bool, | |
differentiable: bool, | |
capturable: bool, | |
has_complex: bool, | |
): | |
for i, param in enumerate(params): | |
grad = grads[i] | |
grad = grad if not maximize else -grad | |
exp_avg = exp_avgs[i] | |
exp_inf = exp_infs[i] | |
step_t = state_steps[i] | |
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] | |
if not torch._utils.is_compiling() and capturable: | |
assert (param.is_cuda and step_t.is_cuda) or ( | |
param.is_xla and step_t.is_xla | |
), "If capturable=True, params and state_steps must be CUDA or XLA tensors." | |
# update step | |
step_t += 1 | |
if weight_decay != 0: | |
grad = grad.add(param, alpha=weight_decay) | |
if torch.is_complex(param): | |
param = torch.view_as_real(param) | |
grad = torch.view_as_real(grad) | |
exp_avg = torch.view_as_real(exp_avg) | |
exp_inf = torch.view_as_real(exp_inf) | |
# Update biased first moment estimate. | |
exp_avg.lerp_(grad, 1 - beta1) | |
# Update the exponentially weighted infinity norm. | |
if not differentiable: | |
torch.maximum( | |
exp_inf.mul_(beta2), | |
grad.abs().add_(eps), | |
out=exp_inf, | |
) | |
else: | |
norm_buf = torch.cat( | |
[exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0 | |
) | |
exp_inf.copy_(torch.amax(norm_buf, 0, keepdim=False)) | |
if capturable: | |
# why jump through extra hoops and negate bias_correction? check out #121238 | |
# once fixed, we should use bias_correction with addcdiv value=-1 for readability | |
neg_bias_correction = beta1 ** step_t - 1 | |
neg_bias_correction.div_(lr) | |
denom = exp_inf * neg_bias_correction | |
param.addcdiv_(exp_avg, denom) | |
else: | |
bias_correction = 1 - beta1 ** _get_value(step_t) | |
clr = lr / bias_correction | |
param.addcdiv_(exp_avg, exp_inf, value=-clr) | |
def _multi_tensor_adamax( | |
params: List[Tensor], | |
grads: List[Tensor], | |
exp_avgs: List[Tensor], | |
exp_infs: List[Tensor], | |
state_steps: List[Tensor], | |
*, | |
beta1: float, | |
beta2: float, | |
lr: float, | |
weight_decay: float, | |
eps: float, | |
maximize: bool, | |
differentiable: bool, | |
capturable: bool, | |
has_complex: bool, | |
): | |
assert not differentiable, "_foreach ops don't support autograd" | |
if len(params) == 0: | |
return | |
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] | |
if (not torch._utils.is_compiling() and capturable | |
and not all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps))): | |
raise RuntimeError("If capturable=True, params and state_steps must be CUDA tensors.") | |
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_infs, state_steps]) | |
for ((grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs, grouped_state_steps), _) in grouped_tensors.values(): | |
if has_complex: | |
_view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs) | |
if maximize: | |
grouped_grads = torch._foreach_neg(grouped_grads) | |
# Update steps | |
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over | |
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just | |
# wrapped it once now. The alpha is required to assure we go to the right overload. | |
if grouped_state_steps[0].is_cpu: | |
torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) | |
else: | |
torch._foreach_add_(grouped_state_steps, 1) | |
if weight_decay != 0: | |
if maximize: | |
# Re-use the intermediate memory (grouped_grads) already allocated for maximize | |
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay) | |
else: | |
grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay) | |
# Update biased first moment estimate. | |
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1) | |
# Update the exponentially weighted infinity norm. | |
torch._foreach_mul_(grouped_exp_infs, beta2) | |
# in this case, we need to introduce a copy of the grads | |
# since one has not been introduced previously | |
if not maximize and weight_decay == 0: | |
grouped_grads = torch._foreach_abs(grouped_grads) | |
else: | |
torch._foreach_abs_(grouped_grads) | |
torch._foreach_add_(grouped_grads, eps) | |
torch._foreach_maximum_(grouped_exp_infs, grouped_grads) | |
if capturable: | |
bias_corrections = torch._foreach_pow(beta1, grouped_state_steps) | |
# foreach_sub doesn't allow a scalar as the first arg | |
torch._foreach_sub_(bias_corrections, 1) | |
torch._foreach_div_(bias_corrections, lr) | |
denom = torch._foreach_mul(grouped_exp_infs, bias_corrections) | |
torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, denom) | |
else: | |
bias_corrections = [1 - beta1 ** _get_value(step) for step in grouped_state_steps] | |
step_size = [(lr / bc) * -1 for bc in bias_corrections] | |
torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, grouped_exp_infs, step_size) | |