library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/test_optim.py
|
test_cycle_lr_with_momentumless_optimizer
|
def test_cycle_lr_with_momentumless_optimizer(self):
# Note [Temporarily set optimizer to Adam]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The TestLRScheduler object carries around an SGD optimizer to avoid having to
# instantiate one for every test. This gets in the way for our very specific case
# in which we need to use Adam (or really any optimizer that doesn't use momentum)
# in order to test that the momentum bug in CyclicLR is fixed (the bug is described
# in more detail in https://github.com/pytorch/pytorch/issues/19003 ).
old_opt = self.opt
self.opt = optim.Adam(
[
{"params": self.net.conv1.parameters()},
{"params": self.net.conv2.parameters(), "lr": 0.5},
],
lr=0.05,
)
lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]
lr_targets = [lr_target, lr_target]
momentum_target = [None] * len(lr_target)
momentum_targets = [momentum_target, momentum_target]
scheduler = CyclicLR(
self.opt,
base_lr=1,
max_lr=5,
step_size_up=4,
cycle_momentum=False,
mode="triangular",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
self.opt = old_opt # set optimizer back to SGD
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_cycle_lr_cycle_momentum_fail_with_momentumless_optimizer
|
def test_cycle_lr_cycle_momentum_fail_with_momentumless_optimizer(self):
with self.assertRaises(ValueError):
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=True)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_cycle_lr_removed_after_out_of_scope
|
def test_cycle_lr_removed_after_out_of_scope(self):
import gc
import weakref
gc.disable()
def test():
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
return weakref.ref(scheduler)
ref = test()
assert ref() is None
gc.enable()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test
|
def test():
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
return weakref.ref(scheduler)
ref = test()
assert ref() is None
gc.enable()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_cycle_lr_state_dict_picklable
|
def test_cycle_lr_state_dict_picklable(self):
adam_opt = optim.Adam(self.net.parameters())
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
self.assertIsInstance(scheduler._scale_fn_ref, weakref.WeakMethod)
state = scheduler.state_dict()
self.assertNotIn("_scale_fn_ref", state)
pickle.dumps(state)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_cycle_lr_scale_fn_restored_from_state_dict
|
def test_cycle_lr_scale_fn_restored_from_state_dict(self):
adam_opt = optim.Adam(self.net.parameters())
# Case 1: Built-in mode
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, mode="triangular2")
restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False)
restored_scheduler.load_state_dict(scheduler.state_dict())
self.assertTrue(restored_scheduler.mode == scheduler.mode == "triangular2")
self.assertIsNotNone(restored_scheduler._scale_fn_ref) and self.assertIsNotNone(scheduler._scale_fn_ref)
self.assertIs(restored_scheduler._scale_fn_custom, None)
self.assertIs(scheduler._scale_fn_custom, None)
# Case 2: Custom `scale_fn`
def scale_fn(_):
return 0.5
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
restored_scheduler.load_state_dict(scheduler.state_dict())
self.assertIs(scheduler._scale_fn_custom, scale_fn)
self.assertIs(restored_scheduler._scale_fn_custom, scale_fn)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
scale_fn
|
def scale_fn(_):
return 0.5
scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
restored_scheduler = CyclicLR(adam_opt, base_lr=1, max_lr=5, cycle_momentum=False, scale_fn=scale_fn)
restored_scheduler.load_state_dict(scheduler.state_dict())
self.assertIs(scheduler._scale_fn_custom, scale_fn)
self.assertIs(restored_scheduler._scale_fn_custom, scale_fn)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_onecycle_lr_invalid_pct_start
|
def test_onecycle_lr_invalid_pct_start(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(self.opt, max_lr=1e-3, total_steps=10, pct_start=1.1)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_onecycle_lr_cannot_calculate_total_steps
|
def test_onecycle_lr_cannot_calculate_total_steps(self):
with self.assertRaises(ValueError):
scheduler = OneCycleLR(self.opt, max_lr=1e-3)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_onecycle_lr_linear_annealing
|
def test_onecycle_lr_linear_annealing(self):
lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5]
momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
final_div_factor=2,
base_momentum=1,
max_momentum=22,
total_steps=10,
anneal_strategy="linear",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_onecycle_lr_linear_annealing_three_phases
|
def test_onecycle_lr_linear_annealing_three_phases(self):
lr_target = [1, 9, 17, 25, 17, 9, 1, 0.75, 0.5, 0.25]
momentum_target = [22, 15, 8, 1, 8, 15, 22, 22, 22, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
div_factor=25,
base_momentum=1,
max_momentum=22,
total_steps=10,
anneal_strategy="linear",
pct_start=0.4,
final_div_factor=4,
three_phase=True,
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
annealing_cos
|
def annealing_cos(start, end, pct):
cos_out = math.cos(math.pi * pct) + 1
return end + (start - end) / 2.0 * cos_out
lr_target = [
1,
13,
25,
annealing_cos(25, 0.5, 1 / 7.0),
annealing_cos(25, 0.5, 2 / 7.0),
annealing_cos(25, 0.5, 3 / 7.0),
annealing_cos(25, 0.5, 4 / 7.0),
annealing_cos(25, 0.5, 5 / 7.0),
annealing_cos(25, 0.5, 6 / 7.0),
0.5,
]
momentum_target = [
22,
11.5,
1,
annealing_cos(1, 22, 1 / 7.0),
annealing_cos(1, 22, 2 / 7.0),
annealing_cos(1, 22, 3 / 7.0),
annealing_cos(1, 22, 4 / 7.0),
annealing_cos(1, 22, 5 / 7.0),
annealing_cos(1, 22, 6 / 7.0),
22,
]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
final_div_factor=2,
base_momentum=1,
max_momentum=22,
total_steps=10,
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_cycle_lr_with_adam
|
def test_cycle_lr_with_adam(self):
old_opt = self.opt
self.opt = optim.Adam(
[
{"params": self.net.conv1.parameters()},
{"params": self.net.conv2.parameters(), "lr": 0.5},
],
lr=0.05,
)
lr_target = [1, 13, 25, 21.5, 18, 14.5, 11, 7.5, 4, 0.5]
momentum_target = [22, 11.5, 1, 4, 7, 10, 13, 16, 19, 22]
lr_targets = [lr_target, lr_target]
momentum_targets = [momentum_target, momentum_target]
scheduler = OneCycleLR(
self.opt,
max_lr=25,
final_div_factor=2,
base_momentum=1,
max_momentum=22,
total_steps=10,
anneal_strategy="linear",
)
self._test_cycle_lr(scheduler, lr_targets, momentum_targets, 10, use_beta1=True)
self.opt = old_opt # set optimizer back to SGD
|
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490
# We limit our configs to CPU only, because we will be moving them to CUDA later
cpu_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
"cpu", dtype, optim_info, skip=("differentiable",)
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_lambda_lr
|
def test_lambda_lr(self):
epochs = 10
self.opt.param_groups[0]["lr"] = 0.05
self.opt.param_groups[1]["lr"] = 0.4
targets = [
[0.05 * (0.9**x) for x in range(epochs)],
[0.4 * (0.8**x) for x in range(epochs)],
]
scheduler = LambdaLR(
self.opt, lr_lambda=[lambda x1: 0.9**x1, lambda x2: 0.8**x2]
)
self._test(scheduler, targets, epochs)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_CosineAnnealingWarmRestarts_lr1
|
def test_CosineAnnealingWarmRestarts_lr1(self, T_mult):
iters = 100
eta_min = 1e-10
T_i = 10
T_cur = 0
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(
self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min
)
for _ in range(1, iters, 1):
T_cur += 1
if T_cur >= T_i:
T_cur = T_cur - T_i
T_i = int(T_mult) * T_i
targets[0] += [
eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
targets[1] += [
eta_min + (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
self._test(scheduler, targets, iters)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_CosineAnnealingWarmRestarts_lr2
|
def test_CosineAnnealingWarmRestarts_lr2(self):
iters = 30
eta_min = 1e-10
T_mults = [1, 2, 4]
for T_mult in T_mults:
T_i = 10
T_cur = 0
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(
self.opt, T_0=T_i, T_mult=T_mult, eta_min=eta_min
)
for _ in torch.arange(0.1, iters, 0.1):
T_cur = round(T_cur + 0.1, 1)
if T_cur >= T_i:
T_cur = T_cur - T_i
T_i = int(T_mult) * T_i
targets[0] += [
eta_min
+ (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
targets[1] += [
eta_min
+ (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
self._test_CosineAnnealingWarmRestarts(scheduler, targets, iters)
|
# Needed for second order optims like LBFGS
closure_loss = torch.rand(1, device=device, dtype=dtype)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_CosineAnnealingWarmRestarts_lr3
|
def test_CosineAnnealingWarmRestarts_lr3(self):
epochs_for_T_mults = [
[0, 1, 2, 3, 4, 5, 12, 27, 3, 4, 5, 6, 13],
[0, 1, 2, 3, 4, 5, 25, 32, 33, 34, 80, 81, 3],
[0, 0.1, 0.2, 0.3, 1.3, 2.3, 17.5, 18.5, 19.5, 29.5, 30.5, 31.5, 50],
]
T_curs_for_T_mults = [
[1, 2, 3, 4, 5, 2, 7, 3, 4, 5, 6, 3],
[1, 2, 3, 4, 5, 15, 2, 3, 4, 10, 11, 3],
[0.1, 0.2, 0.3, 1.3, 2.3, 7.5, 8.5, 9.5, 19.5, 20.5, 21.5, 10],
]
T_is_for_T_mults = [
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 20, 40, 40, 40, 80, 80, 10],
[10, 10, 10, 10, 10, 30, 30, 30, 30, 30, 30, 90],
]
eta_min = 1e-10
T_mults = [1, 2, 3]
for epochs, T_mult, T_curs, T_is in zip(
epochs_for_T_mults, T_mults, T_curs_for_T_mults, T_is_for_T_mults
):
targets = [[0.05], [0.5]]
scheduler = CosineAnnealingWarmRestarts(
self.opt, T_0=10, T_mult=T_mult, eta_min=eta_min
)
for T_cur, T_i in zip(T_curs, T_is):
targets[0] += [
eta_min
+ (0.05 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
targets[1] += [
eta_min
+ (0.5 - eta_min) * (1 + math.cos(math.pi * T_cur / T_i)) / 2
]
self._test_interleaved_CosineAnnealingWarmRestarts(
scheduler, targets, epochs
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_swalr_no_anneal
|
def test_swalr_no_anneal(self):
epochs, swa_start, swa_lr = 10, 5, 0.01
initial_lrs = [group["lr"] for group in self.opt.param_groups]
targets = [
[lr] * (swa_start + 1) + [swa_lr] * (epochs - swa_start - 1)
for lr in initial_lrs
]
swa_scheduler = SWALR(self.opt, anneal_epochs=1, swa_lr=swa_lr)
self._test_swalr(swa_scheduler, None, targets, swa_start, epochs)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_swalr_cosine_anneal_after_multiplicative
|
def test_swalr_cosine_anneal_after_multiplicative(self):
# same swa_lr for different param_groups
epochs, swa_start, swa_lr, anneal_epochs = 15, 5, 0.01, 5
mult_factor = 0.9
scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor)
swa_scheduler = SWALR(self.opt, anneal_epochs=anneal_epochs, swa_lr=swa_lr)
def anneal_coef(t):
if t + 1 >= anneal_epochs:
return 0.0
return (1 + math.cos(math.pi * (t + 1) / anneal_epochs)) / 2
initial_lrs = [group["lr"] for group in self.opt.param_groups]
targets_before_swa = [
[lr * mult_factor**i for i in range(swa_start + 1)] for lr in initial_lrs
]
swa_epochs = epochs - swa_start - 1
targets = [
lrs
+ [
lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t))
for t in range(swa_epochs)
]
for lrs in targets_before_swa
]
self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
anneal_coef
|
def anneal_coef(t):
if t + 1 >= anneal_epochs:
return 0.0
return (1 + math.cos(math.pi * (t + 1) / anneal_epochs)) / 2
initial_lrs = [group["lr"] for group in self.opt.param_groups]
targets_before_swa = [
[lr * mult_factor**i for i in range(swa_start + 1)] for lr in initial_lrs
]
swa_epochs = epochs - swa_start - 1
targets = [
lrs
+ [
lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t))
for t in range(swa_epochs)
]
for lrs in targets_before_swa
]
self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs)
|
for optim_input in cpu_optim_inputs:
if (
"fused" in optim_input.kwargs
and "cuda" not in optim_info.supports_fused_on
):
self.skipTest(
f"cuda is not supported for fused on {optim_cls.__name__}"
)
params = [
Parameter(torch.randn(2, 3, device="cpu", dtype=dtype))
for _ in range(2)
]
for p in params:
p.grad = torch.randn_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_swalr_linear_anneal_after_multiplicative
|
def test_swalr_linear_anneal_after_multiplicative(self):
# separate swa_lr for different param_groups
epochs, swa_start, swa_lrs, anneal_epochs = 15, 5, [0.01, 0.02], 4
mult_factor = 0.9
scheduler = MultiplicativeLR(self.opt, lr_lambda=lambda epoch: mult_factor)
swa_scheduler = SWALR(
self.opt,
anneal_epochs=anneal_epochs,
anneal_strategy="linear",
swa_lr=swa_lrs,
)
def anneal_coef(t):
if t + 1 >= anneal_epochs:
return 0.0
return 1 - (t + 1) / anneal_epochs
initial_lrs = [group["lr"] for group in self.opt.param_groups]
targets_before_swa = [
[lr * mult_factor**i for i in range(swa_start + 1)] for lr in initial_lrs
]
swa_epochs = epochs - swa_start - 1
targets = [
lrs
+ [
lrs[-1] * anneal_coef(t) + swa_lr * (1 - anneal_coef(t))
for t in range(swa_epochs)
]
for lrs, swa_lr in zip(targets_before_swa, swa_lrs)
]
self._test_swalr(swa_scheduler, scheduler, targets, swa_start, epochs)
|
optimizer = optim_cls(params, **optim_input.kwargs)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
_test_swalr
|
def _test_swalr(self, swa_scheduler, scheduler, targets, swa_start, epochs):
for epoch in range(epochs):
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[epoch],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[epoch], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
if epoch >= swa_start:
self.opt.step()
swa_scheduler.step()
elif scheduler is not None:
self.opt.step()
scheduler.step()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_swalr_hypers
|
def test_swalr_hypers(self):
# Test that SWALR raises errors for incorrect hyper-parameters
with self.assertRaisesRegex(ValueError, "anneal_strategy must"):
swa_scheduler = SWALR(self.opt, anneal_strategy="exponential", swa_lr=1.0)
with self.assertRaisesRegex(ValueError, "anneal_epochs must"):
swa_scheduler = SWALR(self.opt, anneal_epochs=-1, swa_lr=1.0)
with self.assertRaisesRegex(ValueError, "anneal_epochs must"):
swa_scheduler = SWALR(self.opt, anneal_epochs=1.7, swa_lr=1.0)
with self.assertRaisesRegex(ValueError, "swa_lr must"):
swa_scheduler = SWALR(self.opt, swa_lr=[1.0, 0.1, 0.01])
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
post_hook
|
def post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.Tensor([1, 1])]
opt = SGD(params, lr=0.001)
data = 2
hook_handle = opt.register_step_post_hook(post_hook)
opt.step()
opt.step()
# check if pre hooks were registered
self.assertEqual(data, 6)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
opt.step()
self.assertEqual(data, 6)
|
def post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_exp_step_lr_state_dict
|
def test_exp_step_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: ExponentialLR(self.opt, gamma=0.1),
lambda: ExponentialLR(self.opt, gamma=0.01),
)
|
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_out_dtype_op.py
|
f
|
def f(x, y):
return out_dtype(
torch.ops.aten.mul.Scalar, torch.int32, x, y
)
inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)
compiled = torch.compile(f, backend="eager", fullgraph=True)
self.assertTrue(torch.allclose(f(*inp), compiled(*inp)))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
f
|
def f(x, y):
return out_dtype(
torch.ops.aten.mul.Scalar, torch.int32, x, y
)
inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)
compiled = torch.compile(f, backend="eager", fullgraph=True)
self.assertTrue(torch.allclose(f(*inp), compiled(*inp)))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
test_out_dtype_non_functional
|
def test_out_dtype_non_functional(self):
class M(torch.nn.Module):
def forward(self, x, y):
return out_dtype(
torch.ops.aten.add_.Tensor, torch.int32, x, y
)
with self.assertRaisesRegex(ValueError, "out_dtype's first argument needs to be a functional operator"):
_ = torch.export.export(
M(), (torch.randint(-128, 127, (5, 5), dtype=torch.int8), torch.randint(-128, 127, (5, 5), dtype=torch.int8)),
)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "dynamo isn't support")
class TestOutDtypeOp(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
forward
|
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
f
|
def f(x, y):
return out_dtype(
torch.ops.aten.mul.Scalar, torch.int32, x, y
)
inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)
compiled = torch.compile(f, backend="eager", fullgraph=True)
self.assertTrue(torch.allclose(f(*inp), compiled(*inp)))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
f
|
def f(x, y):
return out_dtype(
torch.ops.aten.mul.Scalar, torch.int32, x, y
)
inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)
compiled = torch.compile(f, backend="eager", fullgraph=True)
self.assertTrue(torch.allclose(f(*inp), compiled(*inp)))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
func
|
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
ref = torch._int_mm(x, w)
test_out = func(x, w)
func_comp = torch.compile(func, fullgraph=True, mode="max-autotune")
test_out_c = func_comp(x, w)
self.assertTrue(torch.allclose(ref, test_out))
self.assertTrue(torch.allclose(ref, test_out_c))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
func
|
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
ref = torch._int_mm(x, w)
test_out = func(x, w)
func_comp = torch.compile(func, fullgraph=True, mode="max-autotune")
test_out_c = func_comp(x, w)
self.assertTrue(torch.allclose(ref, test_out))
self.assertTrue(torch.allclose(ref, test_out_c))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
forward
|
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
func
|
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
ref = torch._int_mm(x, w)
test_out = func(x, w)
func_comp = torch.compile(func, fullgraph=True, mode="max-autotune")
test_out_c = func_comp(x, w)
self.assertTrue(torch.allclose(ref, test_out))
self.assertTrue(torch.allclose(ref, test_out_c))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
test_cosine_lr_state_dict
|
def test_cosine_lr_state_dict(self):
epochs = 10
eta_min = 1e-10
self._check_scheduler_state_dict(
lambda: CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min),
lambda: CosineAnnealingLR(self.opt, T_max=epochs // 2, eta_min=eta_min / 2),
epochs=epochs,
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
dummy_closure
|
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
data = 2
hook_handle = optim.register_step_post_hook(post_hook)
optim.step(closure)
optim.step(closure)
# check if post hooks were registered
self.assertEqual(data, 6)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
optim.step(closure)
self.assertEqual(data, 6)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
test_reduce_lr_on_plateau_state_dict
|
def test_reduce_lr_on_plateau_state_dict(self):
scheduler = ReduceLROnPlateau(self.opt, mode="min", factor=0.1, patience=2)
for score in [1.0, 2.0, 3.0, 4.0, 3.0, 4.0, 5.0, 3.0, 2.0, 1.0]:
scheduler.step(score)
scheduler_copy = ReduceLROnPlateau(
self.opt, mode="max", factor=0.5, patience=10
)
scheduler_copy.load_state_dict(scheduler.state_dict())
for key in scheduler.__dict__.keys():
if key not in {"optimizer", "is_better"}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_lambda_lr_state_dict_fn
|
def test_lambda_lr_state_dict_fn(self):
scheduler = LambdaLR(self.opt, lr_lambda=lambda x: x)
state = scheduler.state_dict()
self.assertIsNone(state["lr_lambdas"][0])
scheduler_copy = LambdaLR(self.opt, lr_lambda=lambda x: x)
scheduler_copy.load_state_dict(state)
for key in scheduler.__dict__.keys():
if key not in {"optimizer", "lr_lambdas"}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_lambda_lr_state_dict_obj
|
def test_lambda_lr_state_dict_obj(self):
scheduler = LambdaLR(self.opt, lr_lambda=LambdaLRTestObject(10))
state = scheduler.state_dict()
self.assertIsNotNone(state["lr_lambdas"][0])
scheduler_copy = LambdaLR(self.opt, lr_lambda=LambdaLRTestObject(-1))
scheduler_copy.load_state_dict(state)
for key in scheduler.__dict__.keys():
if key not in {"optimizer"}:
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_CosineAnnealingWarmRestarts_lr_state_dict
|
def test_CosineAnnealingWarmRestarts_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: CosineAnnealingWarmRestarts(self.opt, T_0=10, T_mult=2),
lambda: CosineAnnealingWarmRestarts(self.opt, T_0=100),
)
|
closure = dummy_closure if optim_info.step_requires_closure else None
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_swa_lr_state_dict
|
def test_swa_lr_state_dict(self):
self._check_scheduler_state_dict(
lambda: SWALR(self.opt, anneal_epochs=3, swa_lr=0.5),
lambda: SWALR(
self.opt, anneal_epochs=10, anneal_strategy="linear", swa_lr=5.0
),
)
|
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
data = 2
hook_handle = optim.register_step_post_hook(post_hook)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
_check_scheduler_state_dict
|
def _check_scheduler_state_dict(self, constr, constr2, epochs=10):
scheduler = constr()
for _ in range(epochs):
scheduler.optimizer.step()
scheduler.step()
scheduler_copy = constr2()
scheduler_copy.load_state_dict(scheduler.state_dict())
for key in scheduler.__dict__.keys():
if key != "optimizer":
self.assertEqual(scheduler.__dict__[key], scheduler_copy.__dict__[key])
self.assertEqual(scheduler.get_last_lr(), scheduler_copy.get_last_lr())
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_test_get_last_lr
|
def _test_get_last_lr(self, schedulers, targets, epochs=10):
if isinstance(schedulers, LRScheduler):
schedulers = [schedulers]
optimizers = {scheduler.optimizer for scheduler in schedulers}
for epoch in range(epochs):
result = [scheduler.get_last_lr() for scheduler in schedulers]
[optimizer.step() for optimizer in optimizers]
[scheduler.step() for scheduler in schedulers]
target = [[t[epoch] for t in targets]] * len(schedulers)
for t, r in zip(target, result):
self.assertEqual(
target,
result,
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, t, r
),
atol=1e-5,
rtol=0,
)
|
optim.step(closure)
optim.step(closure)
# check if post hooks were registered
self.assertEqual(data, 6)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
_test_with_epoch
|
def _test_with_epoch(self, schedulers, targets, epochs=10):
if isinstance(schedulers, LRScheduler):
schedulers = [schedulers]
optimizers = {scheduler.optimizer for scheduler in schedulers}
for epoch in range(epochs):
[optimizer.step() for optimizer in optimizers]
with warnings.catch_warnings(record=True) as w:
[
scheduler.step(epoch) for scheduler in schedulers
] # step before assert: skip initial lr
self._check_warning_is_epoch_deprecation_warning(
w, num_warnings=len(schedulers)
)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[epoch],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[epoch], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_non_empty_state
|
if __name__ == "__main__":
run_tests()
|
def test_non_empty_state(self, device, dtype, optim_info):
# There are internal tests that check that the state is not empty
optim_cls = optim_info.optim_cls
model = torch.nn.Linear(5, 5)
model.to(dtype=dtype, device=device)
inpt = torch.rand(2, 5, dtype=dtype, device=device)
for optim_input in optim_info.optim_inputs_func(device=device):
optim = optim_cls(model.parameters(), **optim_input.kwargs)
optim.zero_grad()
output = model(inpt)
loss = output.sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
for param in model.parameters():
if param.grad is not None:
param.grad = param.grad.to_sparse()
if optim_info.step_requires_closure:
optim.step(lambda: 1.0)
else:
optim.step()
for state in optim.state.values():
self.assertGreater(len(state), 0)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
@markDynamoStrictTest
class TestOptimRenewed(TestCase):
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_out_dtype_op.py
|
test_out_dtype_make_fx
|
def test_out_dtype_make_fx(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
gm = make_fx(m)(x)
self.assertTrue(torch.allclose(m(x), gm(x)))
gm = make_fx(torch.func.functionalize(M(weight)))(x)
self.assertTrue(torch.allclose(m(x), gm(x)))
FileCheck().check("torch.ops.higher_order.out_dtype").check("aten.mm.default").run(gm.code)
self.assertTrue(torch.allclose(m(x), gm(x)))
for node in gm.graph.nodes:
if node.op == "call_function" and node.target is out_dtype:
# Result of this node should be int32
self.assertTrue(node.meta["val"].dtype, torch.int32)
# Argument of this node should be int8
self.assertTrue(node.args[2].meta["val"].dtype, torch.int8)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "dynamo isn't support")
class TestOutDtypeOp(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
__init__
|
def __init__(self, weight):
super().__init__()
self.weight = weight
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
forward
|
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
test_out_dtype_op_functional
|
def test_out_dtype_op_functional(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
ep = torch.export.export(
m,
(x,),
)
FileCheck().check("torch.ops.higher_order.out_dtype").check("aten.mm.default").run(ep.graph_module.code)
self.assertTrue(torch.allclose(m(x), ep.module()(x)))
for node in ep.graph.nodes:
if node.op == "call_function" and node.target is out_dtype:
# Result of this node should be int32
self.assertTrue(node.meta["val"].dtype, torch.int32)
# Argument of this node should be int8
self.assertTrue(node.args[2].meta["val"].dtype, torch.int8)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "dynamo isn't support")
class TestOutDtypeOp(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
__init__
|
def __init__(self, weight):
super().__init__()
self.weight = weight
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
forward
|
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
test_out_dtype_mm_numerical
|
def test_out_dtype_mm_numerical(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
gm = make_fx(m)(x)
x_casted = x.to(torch.int32)
weight_casted = weight.to(torch.int32)
numerical_res = torch.ops.aten.mm.default(x_casted, weight_casted)
self.assertTrue(torch.allclose(numerical_res, gm(x)))
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "dynamo isn't support")
class TestOutDtypeOp(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
__init__
|
def __init__(self, weight):
super().__init__()
self.weight = weight
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
forward
|
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class SchedulerTestNet(torch.nn.Module):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
local_post_hook
|
def local_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(2)
params = [torch.Tensor([1, 1])]
opt1 = SGD(params, lr=0.001)
opt2 = Adam(params, lr=0.01)
data = []
# register global hooks to both optimizers
global_pre_handle = register_optimizer_step_pre_hook(global_pre_hook)
global_post_handle = register_optimizer_step_post_hook(global_post_hook)
# register local hooks
first_pre_handle = opt1.register_step_pre_hook(local_pre_hook)
first_post_handle = opt1.register_step_post_hook(local_post_hook)
second_pre_handle = opt2.register_step_pre_hook(local_pre_hook)
second_post_handle = opt2.register_step_post_hook(local_post_hook)
opt1.step()
self.assertListEqual(data, [0, 1, 2, 5])
opt2.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5])
opt1.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
# remove all hooks
global_pre_handle.remove()
global_post_handle.remove()
first_pre_handle.remove()
first_post_handle.remove()
second_pre_handle.remove()
second_post_handle.remove()
opt1.step()
opt2.step()
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
|
def local_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data.append(2)
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
compute_preactivation
|
def compute_preactivation(self, x):
return self.fc1(x)
|
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class SWATestDNN(torch.nn.Module):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
forward
|
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class SchedulerTestNet(torch.nn.Module):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
__init__
|
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class SchedulerTestNet(torch.nn.Module):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
compute_preactivation
|
def compute_preactivation(self, x):
return self.fc1(x)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class SWATestDNN(torch.nn.Module):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
forward
|
def forward(self, x):
return self.conv2(F.relu(self.conv1(x)))
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class SchedulerTestNet(torch.nn.Module):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_test_averaged_model
|
def _test_averaged_model(self, net_device, swa_device):
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.ReLU(),
torch.nn.MaxPool2d(kernel_size=2),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Conv2d(5, 2, kernel_size=3),
torch.nn.ReLU(),
torch.nn.Linear(5, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 10),
).to(net_device)
averaged_dnn = AveragedModel(dnn, device=swa_device)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
p_avg += p.detach() / n_updates
averaged_dnn.update_parameters(dnn)
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
# Check that AveragedModel is on the correct device
self.assertTrue(p_swa.device == swa_device)
self.assertTrue(p.device == net_device)
self.assertTrue(averaged_dnn.n_averaged.device == swa_device)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_averaged_model_all_devices
|
def test_averaged_model_all_devices(self):
cpu = torch.device("cpu")
self._test_averaged_model(cpu, cpu)
if torch.cuda.is_available():
cuda = torch.device(0)
self._test_averaged_model(cuda, cpu)
self._test_averaged_model(cpu, cuda)
self._test_averaged_model(cuda, cuda)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_adamax
|
def test_adamax(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
[weight, bias], lr=1e-1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adamax(
[weight, bias],
lr=1e-1,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(optim.Adamax)
self._test_complex_2d(functools.partial(optim.Adamax, foreach=True))
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 1: 1.0"
):
optim.Adamax(None, lr=1e-2, betas=(0.0, 1.0))
|
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_rprop
|
def test_rprop(self):
is_cuda_sm86 = torch.cuda.is_available() and torch.cuda.get_device_capability(
0
) == (8, 6)
for foreach in (False, True):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Rprop(
[weight, bias], lr=2e-4, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Rprop(
self._build_params_dict(weight, bias, lr=1e-2),
lr=2e-4,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
atol=4e-5 if is_cuda_sm86 else None,
rtol=3e-5 if is_cuda_sm86 else None,
)
self._test_complex_2d(lambda param: optim.Rprop(param, foreach=foreach))
self._test_complex_optimizer(
lambda param: optim.Rprop([param], lr=0.001, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.Rprop(
[param], lr=0.001, maximize=True, foreach=foreach
)
)
with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"):
optim.Rprop(None, lr=1e-2, etas=(1.0, 0.5), foreach=foreach)
|
self.assertEqual(
getPublicAttrs(optimizer), getPublicAttrs(deepcopy(optimizer))
)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_adamw
|
def test_adamw(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias],
lr=1e-3,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.AdamW(
[weight, bias],
lr=1e-3,
weight_decay=1,
amsgrad=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(optim.AdamW)
self._test_complex_2d(functools.partial(optim.AdamW, foreach=True))
with self.assertRaisesRegex(ValueError, "Invalid weight_decay value: -1"):
optim.AdamW(None, lr=1e-2, weight_decay=-1)
|
@optims(
[optim for optim in optim_db if optim.step_requires_closure],
dtypes=[torch.float32],
)
def test_second_order_optims_return_consistent_types(
self, device, dtype, optim_info
):
# Motivated by #7586
optim_cls = optim_info.optim_cls
params = [
torch.randn(10, 5, device=device, dtype=dtype),
torch.randn(10, device=device, dtype=dtype),
]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_nadam
|
def test_nadam(self):
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
[weight, bias], lr=1e-3, foreach=foreach
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
self._build_params_dict(weight, bias, lr=1e-2), lr=1e-3, foreach=foreach
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
[weight, bias],
lr=1e-3,
weight_decay=0.1,
momentum_decay=6e-3,
foreach=foreach,
),
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, foreach: optim.NAdam(
[weight, bias],
lr=1e-3,
weight_decay=0.1,
momentum_decay=6e-3,
foreach=foreach,
),
[lambda opt: ExponentialLR(opt, gamma=0.9)],
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(
ValueError, "Invalid beta parameter at index 0: 1.0"
):
optim.NAdam(None, lr=1e-2, betas=(1.0, 0.0))
with self.assertRaisesRegex(ValueError, "Invalid momentum_decay value: -0.2"):
optim.NAdam(None, lr=1e-2, momentum_decay=-0.2)
|
torch.manual_seed(1)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model.to(dtype=dtype, device=dev)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_bn_update_eval_momentum
|
def test_bn_update_eval_momentum(self):
# check that update_bn preserves eval mode
objects = 100
input_channels = 3
height, width = 5, 5
x = torch.rand(objects, input_channels, height, width)
ds_x = torch.utils.data.TensorDataset(x)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dnn = SWATestCNN(input_channels=input_channels)
dnn.eval()
update_bn(dl_x, dnn)
self.assertFalse(dnn.training)
# check that momentum is preserved
self.assertEqual(dnn.bn.momentum, 0.3)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_diff_fn
|
def _diff_fn(p, grad, opt_differentiable_state, opt_class, kwargs, *ignored):
# Ignored is the list of values in `opt_differentiable_state`, we do this
# for `gradcheck` to correctly track the state tensors as function inputs
# because otherwise it can't unpack the values in the `opt_differentiable_state`
# dict
p = p.clone()
p.grad = grad
opt_differentiable_state = {
k: v.clone() if isinstance(v, torch.Tensor) else v
for k, v in opt_differentiable_state.items()
}
opt = opt_class([p], **kwargs)
opt.state[p].update(opt_differentiable_state)
opt.step()
return (p,) + tuple(
v
for v in opt.state[p].values()
if isinstance(v, torch.Tensor) and v.requires_grad
)
class TestDifferentiableOptimizer(TestCase):
def test_sgd(self):
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
mbuff = torch.rand(10, requires_grad=True, dtype=torch.float64)
state = {"momentum_buffer": mbuff}
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.SGD,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
def test_adam(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.Adam,
{"lr": 0.9, "differentiable": True, "amsgrad": True},
*state.values(),
),
)
def test_rmsprop(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["step"] = 0
state["square_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["momentum_buffer"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
# This can cause issues with large values and nan due to sqrt ops
state["grad_avg"] = 1e-2 * torch.rand(
10, requires_grad=True, dtype=torch.float64
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.RMSprop,
{
"lr": 0.9,
"maximize": True,
"momentum": 0.9,
"differentiable": True,
"centered": True,
"weight_decay": 0.1,
},
*state.values(),
),
)
def test_adadelta(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["square_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["acc_delta"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.Adadelta,
{"lr": 0.9, "weight_decay": 0.1, "differentiable": True},
*state.values(),
),
)
def test_adagrad(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["sum"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.Adagrad,
{"lr": 0.9, "weight_decay": 0.1, "differentiable": True},
*state.values(),
),
)
def test_adamax(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_inf"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.Adamax,
{"lr": 0.9, "weight_decay": 0.1, "differentiable": True},
*state.values(),
),
)
@skipIfTorchDynamo("The inplace mu update fails with dynamo, "
"since this is only happening when differentiable is enabled, skipping for now")
def test_asgd(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` `eta` & `mu` are not continuous variables (even though we define them as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["eta"] = torch.tensor(0.9, requires_grad=False, dtype=torch.float64)
state["mu"] = torch.tensor(1.0, requires_grad=False, dtype=torch.float64)
state["ax"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.ASGD,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
def test_rprop(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["prev"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["step_size"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.Rprop,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
def test_adamw(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["max_exp_avg_sq"] = torch.rand(
10, requires_grad=True, dtype=torch.float64
)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.AdamW,
{"lr": 0.9, "differentiable": True, "amsgrad": True},
*state.values(),
),
)
def test_nadam(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["mu_product"] = torch.tensor(1.0, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.NAdam,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
def test_radam(self):
state = {}
p = torch.rand(10, requires_grad=True, dtype=torch.float64)
grad = torch.rand(10, requires_grad=True, dtype=torch.float64)
# `step` is not a continuous variable (even though we define it as a float)
# and so it shouldn't require gradients.
state["step"] = torch.tensor(10.0, requires_grad=False, dtype=torch.float64)
state["exp_avg"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
state["exp_avg_sq"] = torch.rand(10, requires_grad=True, dtype=torch.float64)
gradcheck(
_diff_fn,
(
p,
grad,
state,
torch.optim.RAdam,
{"lr": 0.9, "differentiable": True},
*state.values(),
),
)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_defaults_changed_to_foreach(self):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
multi_optims = ((optim.Adam, adam, "_multi_tensor_adam"),
(optim.AdamW, adamw, "_multi_tensor_adamw"),
(optim.NAdam, nadam, "_multi_tensor_nadam"),
(optim.SGD, sgd, "_multi_tensor_sgd"),
(optim.RAdam, radam, "_multi_tensor_radam"),
(optim.RMSprop, rmsprop, "_multi_tensor_rmsprop"),
(optim.Rprop, rprop, "_multi_tensor_rprop"),
(optim.ASGD, asgd, "_multi_tensor_asgd"),
(optim.Adamax, adamax, "_multi_tensor_adamax"),
(optim.Adadelta, adadelta, "_multi_tensor_adadelta"),
(optim.Adagrad, adagrad, "_multi_tensor_adagrad"),)
model = torch.nn.Linear(5, 5)
model.to(dtype=torch.float64, device="cuda")
input = torch.rand(2, 5, dtype=torch.float64, device="cuda")
for opt, mod, func in multi_optims:
defaults = {}
if opt == optim.SGD:
defaults["lr"] = 1e-2
optimizer = opt(model.parameters(), **defaults)
optimizer.zero_grad()
output = model(input)
loss = output.sum()
loss.backward()
with patch.object(mod, func) as mocked_foreach_impl:
optimizer.step()
self.assertTrue(mocked_foreach_impl.called)
if __name__ == "__main__":
run_tests()
|
optim.step(closure)
self.assertListEqual(data, [0, 1, 2, 5])
optim2.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5])
optim.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
# remove all hooks
global_pre_handle.remove()
global_post_handle.remove()
first_pre_handle.remove()
first_post_handle.remove()
second_pre_handle.remove()
second_post_handle.remove()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_sgd
|
def test_sgd(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict_single(weight, bias, lr=1e-2),
lr=1e-3,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
self._build_params_dict_single(weight, bias, lr=1e-2),
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: StepLR(opt, gamma=0.9, step_size=10)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: LinearLR(
opt, start_factor=0.4, end_factor=0.8, total_iters=4
)
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: LinearLR(
opt, start_factor=0.4, end_factor=0.6, total_iters=4
),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[
lambda opt: StepLR(opt, gamma=0.99, step_size=10),
lambda opt: ExponentialLR(opt, gamma=0.99),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
lr=1e-3,
momentum=0.5,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
lr=1e-3,
momentum=0.5,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias],
nesterov=True,
lr=1e-3,
momentum=0.5,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.SGD(
[weight, bias], lr=1e-3, maximize=maximize, foreach=foreach
),
[lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -0.5"):
optim.SGD(None, lr=1e-2, momentum=-0.5)
|
optim.step(closure)
optim2.step(closure)
self.assertListEqual(data, [0, 1, 2, 5, 0, 1, 2, 5, 0, 1, 2, 5])
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_rmsprop
|
def test_rmsprop(self):
for foreach in (False, True):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
[weight, bias], lr=1e-2, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
centered=True,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
centered=True,
momentum=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
momentum=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.RMSprop(
self._build_params_dict(weight, bias, lr=1e-3),
lr=1e-2,
momentum=0.1,
weight_decay=1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_complex_2d(lambda param: optim.RMSprop(param, foreach=foreach))
self._test_complex_2d(
lambda param: optim.RMSprop(param, centered=True, foreach=foreach)
)
self._test_complex_2d(
lambda param: optim.RMSprop(param, momentum=0.1, foreach=foreach)
)
self._test_complex_2d(
lambda param: optim.RMSprop(param, maximize=True, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], centered=True, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], momentum=0.1, foreach=foreach)
)
self._test_complex_optimizer(
lambda param: optim.RMSprop([param], maximize=True, foreach=foreach)
)
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"):
optim.RMSprop(None, lr=1e-2, momentum=-1.0, foreach=foreach)
|
params = [
Parameter(torch.randn(2, 3, device=device, dtype=dtype)) for _ in range(2)
]
for p in params:
p.grad = torch.rand_like(p)
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
p.grad = p.grad.to_sparse()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
test_adadelta
|
def test_adadelta(self):
# Handles https://github.com/pytorch/pytorch/issues/69698
self.rel_tol = 4e-3
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
[weight, bias], maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
self._build_params_dict(weight, bias, rho=0.95),
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
self._build_params_dict(weight, bias, rho=0.95),
maximize=maximize,
foreach=foreach,
),
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adadelta(
[weight, bias], weight_decay=1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid rho value: 1.1"):
optim.Adadelta(None, lr=1e-2, rho=1.1)
|
# Needed for second order optims like LBFGS
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
closure
|
def closure():
return torch.tensor([10])
res1 = opt1.step(closure)
res2 = opt2.step(closure)
self.assertEqual(type(res1), type(res2))
|
def closure():
optimizer.zero_grad()
loss = (weight.mv(input) + bias).pow(2).sum()
loss.backward()
if optim_info.only_supports_sparse_grads:
# For this test, we naively convert the Tensor layout, which we know does
# NOT represent the expected use case for optims like SparseAdam!
weight.grad = weight.grad.to_sparse()
bias.grad = bias.grad.to_sparse()
return loss
initial_value = closure().item()
for _ in range(20):
if optim_info.step_requires_closure:
loss = optimizer.step(closure)
else:
loss = closure()
optimizer.step()
for scheduler in schedulers:
if isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
else:
scheduler.step()
if optim_input.kwargs.get("maximize", False):
self.assertGreater(closure().item(), initial_value)
else:
self.assertLess(closure().item(), initial_value)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_adagrad
|
def test_adagrad(self):
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
[weight, bias], lr=1e-1, maximize=maximize, foreach=foreach
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
[weight, bias],
lr=1e-1,
initial_accumulator_value=0.1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
[lambda opt: ReduceLROnPlateau(opt)],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
self._test_basic_cases(
lambda weight, bias, maximize, foreach: optim.Adagrad(
self._build_params_dict(weight, bias, lr=1e-2),
lr=1e-1,
maximize=maximize,
foreach=foreach,
),
[
lambda opt: ReduceLROnPlateau(opt),
lambda opt: ExponentialLR(opt, gamma=0.99),
],
constructor_accepts_maximize=True,
constructor_accepts_foreach=True,
)
with self.assertRaisesRegex(ValueError, "Invalid lr_decay value: -0.5"):
optim.Adagrad(None, lr=1e-2, lr_decay=-0.5)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
class TestOptim(TestCase):
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
getPublicAttrs
|
def getPublicAttrs(obj):
return {k for k in obj.__dict__ if not k.startswith("_")}
for optim_input in all_optim_inputs:
optimizer = optim_cls(params, **optim_input.kwargs)
# Make some state
for _ in range(3):
if optim_info.step_requires_closure:
optimizer.step(closure)
else:
closure()
optimizer.step()
self.assertEqual(
getPublicAttrs(optimizer), getPublicAttrs(deepcopy(optimizer))
)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_optim.py
|
_test
|
def _test(self, schedulers, targets, epochs=10):
if isinstance(schedulers, LRScheduler):
schedulers = [schedulers]
for epoch in range(epochs):
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[epoch],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[epoch], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
[scheduler.step() for scheduler in schedulers]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_test_CosineAnnealingWarmRestarts
|
def _test_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs=10):
for index, epoch in enumerate(torch.arange(0, epochs, 0.1)):
epoch = round(epoch.item(), 1)
scheduler.step(epoch)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[index],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[index], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
|
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
_test_interleaved_CosineAnnealingWarmRestarts
|
def _test_interleaved_CosineAnnealingWarmRestarts(self, scheduler, targets, epochs):
for index, epoch in enumerate(epochs):
scheduler.step(epoch)
for param_group, target in zip(self.opt.param_groups, targets):
self.assertEqual(
target[index],
param_group["lr"],
msg="LR is wrong in epoch {}: expected {}, got {}".format(
epoch, target[index], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
|
optim.step(closure)
self.assertEqual(data, 6)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
class TestLRScheduler(TestCase):
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
pre_hook
|
def pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.Tensor([1, 1])]
opt = SGD(params, lr=0.001)
data = 5
hook_handle = opt.register_step_pre_hook(pre_hook)
opt.step()
opt.step()
# check if pre hooks were registered
self.assertEqual(data, 9)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
opt.step()
self.assertEqual(data, 9)
|
def pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
nonlocal data
data += 2
params = [torch.tensor([1, 1], device=device, dtype=dtype)]
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
dummy_closure
|
for param_group, lr_target, momentum_target in zip(
self.opt.param_groups, lr_targets, momentum_targets
):
self.assertEqual(
lr_target[batch_num],
param_group["lr"],
msg="LR is wrong in batch_num {}: expected {}, got {}".format(
batch_num, lr_target[batch_num], param_group["lr"]
),
atol=1e-5,
rtol=0,
)
if use_beta1 and "betas" in param_group.keys():
self.assertEqual(
momentum_target[batch_num],
param_group["betas"][0],
msg="Beta1 is wrong in batch_num {}: expected {}, got {}".format(
batch_num,
momentum_target[batch_num],
param_group["betas"][0],
),
atol=1e-5,
rtol=0,
)
elif "momentum" in param_group.keys():
self.assertEqual(
momentum_target[batch_num],
param_group["momentum"],
msg="Momentum is wrong in batch_num {}: expected {}, got {}".format(
batch_num,
momentum_target[batch_num],
param_group["momentum"],
),
atol=1e-5,
rtol=0,
)
self.opt.step()
scheduler.step()
|
def dummy_closure():
return 1
closure = dummy_closure if optim_info.step_requires_closure else None
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
data = 2
hook_handle = optim.register_step_post_hook(post_hook)
optim.step(closure)
optim.step(closure)
# check if post hooks were registered
self.assertEqual(data, 6)
# remove handles, take step and verify that hook is no longer registered
hook_handle.remove()
optim.step(closure)
self.assertEqual(data, 6)
|
import functools
import math
import tempfile
import unittest
from copy import deepcopy
from typing import Any, Dict, Tuple
from unittest.mock import patch
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
from optim.test_optim import TestDifferentiableOptimizer # noqa: F401
from optim.test_swa_utils import TestSWAUtils # noqa: F401
import torch
from torch.nn import Parameter
from torch.optim import Optimizer, SGD
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.optimizer import (
register_optimizer_step_post_hook,
register_optimizer_step_pre_hook,
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_device_type import (
instantiate_device_type_tests,
largeTensorTest,
onlyCPU,
onlyCUDA,
onlyNativeDeviceTypes,
skipMPS,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_dtype import floating_types_and
from torch.testing._internal.common_optimizers import (
_get_device_type,
_get_optim_inputs_including_global_cliquey_kwargs,
optim_db,
OptimizerErrorEnum,
optims,
TensorTracker,
)
from torch.testing._internal.common_utils import (
markDynamoStrictTest,
parametrize,
run_tests,
TEST_WITH_TORCHDYNAMO,
TestCase,
)
FP16_REDUCED_PRECISION = {"atol": 1e-5, "rtol": 1e-4}
import gc
import inspect
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
|
torch
|
test/test_out_dtype_op.py
|
forward
|
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
class M(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
multiple_out
|
def multiple_out(x):
return out_dtype(
torch.ops.aten.topk.default, torch.int32, x, 5
)
inp = (torch.randn(10),)
with self.assertRaisesRegex(ValueError, "out_dtype's can only apply to ops that return a single tensor"):
multiple_out(*inp)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_out_dtype_op.py
|
singleton_list_out
|
def singleton_list_out(x):
return out_dtype(
torch.ops.aten.split_copy.Tensor, torch.int32, x, 10
)
with self.assertRaisesRegex(ValueError, "out_dtype's can only apply to ops that return a single tensor"):
singleton_list_out(*inp)
|
import unittest
import torch
import torch._dynamo
import torch._inductor
import torch._inductor.decomposition
from torch._higher_order_ops.out_dtype import out_dtype
from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import (
run_tests, TestCase, IS_WINDOWS, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU, TEST_CUDA
)
from torch.testing._internal.common_quantization import skipIfNoDynamoSupport
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import SM80OrLater, _get_torch_cuda_version
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/test_overrides.py
|
__repr__
|
def __repr__(self):
return "DiagonalTensor(N={}, value={})".format(self._N, self._i)
|
def __repr__(self):
return f"DiagonalTensor(N={self._N}, value={self._i})"
|
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property,
TorchFunctionMode,
_get_current_function_mode,
_get_current_function_mode_stack,
)
from torch.utils._mode_utils import all_same_mode
from torch.utils._pytree import tree_map
Tensor = torch.Tensor
HANDLED_FUNCTIONS_DIAGONAL = {}
class DiagonalTensor:
from torch.testing._internal.generated.annotated_fn_args import annotated_args
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
from torch.distributions.utils import broadcast_all
|
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
import contextlib
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_ignored_functions,
get_overridable_functions,
get_testing_overrides,
resolve_name,
is_tensor_method_or_property,
TorchFunctionMode,
_get_current_function_mode,
_get_current_function_mode_stack,
BaseTorchFunctionMode
)
from torch.utils._mode_utils import all_same_mode
from torch.utils._pytree import tree_map
Tensor = torch.Tensor
HANDLED_FUNCTIONS_DIAGONAL = {}
class DiagonalTensor:
from torch.testing._internal.generated.annotated_fn_args import annotated_args
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
from torch.distributions.utils import broadcast_all
from torch._C import _is_torch_function_all_disabled
from torch._C import _len_torch_function_stack
from torch.utils._device import DeviceContext
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_overrides.py
|
__eq__
|
def __eq__(self, other):
if type(other) is type(self):
if self._N == other._N and self._i == other._i:
return True
else:
return False
else:
return False
|
def __eq__(self, other):
return type(other) is type(self) and self._N == other._N and self._i == other._i
|
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property,
TorchFunctionMode,
_get_current_function_mode,
_get_current_function_mode_stack,
)
from torch.utils._mode_utils import all_same_mode
from torch.utils._pytree import tree_map
Tensor = torch.Tensor
HANDLED_FUNCTIONS_DIAGONAL = {}
class DiagonalTensor:
from torch.testing._internal.generated.annotated_fn_args import annotated_args
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
from torch.distributions.utils import broadcast_all
|
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
import contextlib
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_ignored_functions,
get_overridable_functions,
get_testing_overrides,
resolve_name,
is_tensor_method_or_property,
TorchFunctionMode,
_get_current_function_mode,
_get_current_function_mode_stack,
BaseTorchFunctionMode
)
from torch.utils._mode_utils import all_same_mode
from torch.utils._pytree import tree_map
Tensor = torch.Tensor
HANDLED_FUNCTIONS_DIAGONAL = {}
class DiagonalTensor:
from torch.testing._internal.generated.annotated_fn_args import annotated_args
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
from torch.distributions.utils import broadcast_all
from torch._C import _is_torch_function_all_disabled
from torch._C import _len_torch_function_stack
from torch.utils._device import DeviceContext
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
torch
|
test/test_optim.py
|
test_averaged_model_mixed_device
|
def test_averaged_model_mixed_device(self):
if not torch.cuda.is_available():
return
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10)
)
dnn[0].cuda()
dnn[1].cpu()
averaged_dnn = AveragedModel(dnn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
p_avg += p.detach() / n_updates
averaged_dnn.update_parameters(dnn)
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
# Check that AveragedModel is on the correct device
self.assertTrue(p_avg.device == p_swa.device)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_averaged_model_state_dict
|
def test_averaged_model_state_dict(self):
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3), torch.nn.Linear(5, 10)
)
averaged_dnn = AveragedModel(dnn)
averaged_dnn2 = AveragedModel(dnn)
n_updates = 10
for i in range(n_updates):
for p in dnn.parameters():
p.detach().add_(torch.randn_like(p))
averaged_dnn.update_parameters(dnn)
averaged_dnn2.load_state_dict(averaged_dnn.state_dict())
for p_swa, p_swa2 in zip(averaged_dnn.parameters(), averaged_dnn2.parameters()):
self.assertEqual(p_swa, p_swa2)
self.assertTrue(averaged_dnn.n_averaged == averaged_dnn2.n_averaged)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_averaged_model_exponential
|
def test_averaged_model_exponential(self):
# Test AveragedModel with EMA as avg_fn
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Linear(5, 10),
)
alpha = 0.9
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append(
(p_avg * alpha + p * (1 - alpha)).clone()
)
for b in dnn.buffers():
if b.size() != torch.Size([]):
b.detach_().add_(torch.randn_like(b))
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
for b_avg, b_swa in zip(dnn.buffers(), averaged_dnn.module.buffers()):
self.assertEqual(b_avg, b_swa)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
avg_fn
|
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append(
(p_avg * alpha + p * (1 - alpha)).clone()
)
for b in dnn.buffers():
if b.size() != torch.Size([]):
b.detach_().add_(torch.randn_like(b))
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
for b_avg, b_swa in zip(dnn.buffers(), averaged_dnn.module.buffers()):
self.assertEqual(b_avg, b_swa)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_averaged_model_exponential_buffers
|
def test_averaged_model_exponential_buffers(self):
# Test AveragedModel with EMA as avg_fn and use_buffers as True.
dnn = torch.nn.Sequential(
torch.nn.Conv2d(1, 5, kernel_size=3),
torch.nn.BatchNorm2d(5, momentum=0.3),
torch.nn.Linear(5, 10),
)
alpha = 0.9
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn, use_buffers=True)
dnn_params = itertools.chain(dnn.parameters(), dnn.buffers())
averaged_params = [
torch.zeros_like(param)
for param in dnn_params
if param.size() != torch.Size([])
]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn_params, averaged_params):
if p.size() == torch.Size([]):
continue
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append(
(p_avg * alpha + p * (1 - alpha)).clone()
)
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(
averaged_params,
itertools.chain(
averaged_dnn.module.parameters(), averaged_dnn.module.buffers()
),
):
self.assertEqual(p_avg, p_swa)
|
all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs(
device, dtype, optim_info
)
for optim_input in all_optim_inputs:
optim = optim_info.optim_cls(params, **optim_input.kwargs)
optim2 = SGD(params)
data = []
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
avg_fn
|
def avg_fn(p_avg, p, n_avg):
return alpha * p_avg + (1 - alpha) * p
averaged_dnn = AveragedModel(dnn, avg_fn=avg_fn)
averaged_params = [torch.zeros_like(param) for param in dnn.parameters()]
n_updates = 10
for i in range(n_updates):
updated_averaged_params = []
for p, p_avg in zip(dnn.parameters(), averaged_params):
p.detach().add_(torch.randn_like(p))
if i == 0:
updated_averaged_params.append(p.clone())
else:
updated_averaged_params.append(
(p_avg * alpha + p * (1 - alpha)).clone()
)
for b in dnn.buffers():
if b.size() != torch.Size([]):
b.detach_().add_(torch.randn_like(b))
averaged_dnn.update_parameters(dnn)
averaged_params = updated_averaged_params
for p_avg, p_swa in zip(averaged_params, averaged_dnn.parameters()):
self.assertEqual(p_avg, p_swa)
for b_avg, b_swa in zip(dnn.buffers(), averaged_dnn.module.buffers()):
self.assertEqual(b_avg, b_swa)
|
# register global hooks to both optimizers
global_pre_handle = register_optimizer_step_pre_hook(global_pre_hook)
global_post_handle = register_optimizer_step_post_hook(global_post_hook)
# register local hooks
first_pre_handle = optim.register_step_pre_hook(local_pre_hook)
first_post_handle = optim.register_step_post_hook(local_post_hook)
second_pre_handle = optim2.register_step_pre_hook(local_pre_hook)
second_post_handle = optim2.register_step_post_hook(local_post_hook)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
|
torch
|
test/test_optim.py
|
_test_update_bn
|
def _test_update_bn(self, dnn, dl_x, dl_xy, cuda):
preactivation_sum = torch.zeros(dnn.n_features)
preactivation_squared_sum = torch.zeros(dnn.n_features)
if cuda:
preactivation_sum = preactivation_sum.cuda()
preactivation_squared_sum = preactivation_squared_sum.cuda()
total_num = 0
for x in dl_x:
x = x[0]
if cuda:
x = x.cuda()
dnn.forward(x)
preactivations = dnn.compute_preactivation(x)
if len(preactivations.shape) == 4:
preactivations = preactivations.transpose(1, 3)
preactivations = preactivations.contiguous().view(-1, dnn.n_features)
total_num += preactivations.shape[0]
preactivation_sum += torch.sum(preactivations, dim=0)
preactivation_squared_sum += torch.sum(preactivations**2, dim=0)
preactivation_mean = preactivation_sum / total_num
preactivation_var = preactivation_squared_sum / total_num
preactivation_var = preactivation_var - preactivation_mean**2
update_bn(dl_xy, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
def _reset_bn(module):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
# reset batch norm and run update_bn again
dnn.apply(_reset_bn)
update_bn(dl_xy, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
# using the dl_x loader instead of dl_xy
dnn.apply(_reset_bn)
update_bn(dl_x, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
_reset_bn
|
def _reset_bn(module):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
# reset batch norm and run update_bn again
dnn.apply(_reset_bn)
update_bn(dl_xy, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
# using the dl_x loader instead of dl_xy
dnn.apply(_reset_bn)
update_bn(dl_x, dnn, device=x.device)
self.assertEqual(preactivation_mean, dnn.bn.running_mean)
self.assertEqual(preactivation_var, dnn.bn.running_var, atol=1e-1, rtol=0)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_update_bn_dnn
|
def test_update_bn_dnn(self):
# Test update_bn for a fully-connected network with BatchNorm1d
objects, input_features = 100, 5
x = torch.rand(objects, input_features)
y = torch.rand(objects)
ds_x = torch.utils.data.TensorDataset(x)
ds_xy = torch.utils.data.TensorDataset(x, y)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dl_xy = torch.utils.data.DataLoader(ds_xy, batch_size=5, shuffle=True)
dnn = SWATestDNN(input_features=input_features)
dnn.train()
self._test_update_bn(dnn, dl_x, dl_xy, False)
if torch.cuda.is_available():
dnn = SWATestDNN(input_features=input_features)
dnn.train()
self._test_update_bn(dnn.cuda(), dl_x, dl_xy, True)
self.assertTrue(dnn.training)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_optim.py
|
test_update_bn_cnn
|
def test_update_bn_cnn(self):
# Test update_bn for convolutional network and BatchNorm2d
objects = 100
input_channels = 3
height, width = 5, 5
x = torch.rand(objects, input_channels, height, width)
y = torch.rand(objects)
ds_x = torch.utils.data.TensorDataset(x)
ds_xy = torch.utils.data.TensorDataset(x, y)
dl_x = torch.utils.data.DataLoader(ds_x, batch_size=5, shuffle=True)
dl_xy = torch.utils.data.DataLoader(ds_xy, batch_size=5, shuffle=True)
dnn = SWATestCNN(input_channels=input_channels)
dnn.train()
self._test_update_bn(dnn, dl_x, dl_xy, False)
if torch.cuda.is_available():
dnn = SWATestCNN(input_channels=input_channels)
dnn.train()
self._test_update_bn(dnn.cuda(), dl_x, dl_xy, True)
self.assertTrue(dnn.training)
|
import warnings
import math
import unittest
import functools
import itertools
import pickle
from copy import deepcopy
import weakref
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter
from torch.optim import Adam, SGD, Optimizer
from torch import sparse
from torch.optim.lr_scheduler import (
LambdaLR,
MultiplicativeLR,
SequentialLR,
StepLR,
MultiStepLR,
ConstantLR,
LinearLR,
ExponentialLR,
CosineAnnealingLR,
ReduceLROnPlateau,
LRScheduler,
CyclicLR,
CosineAnnealingWarmRestarts,
OneCycleLR,
ChainedScheduler,
PolynomialLR,
EPOCH_DEPRECATION_WARNING,
)
from torch.optim.swa_utils import AveragedModel, SWALR, update_bn
from torch.testing._internal.common_utils import (
TestCase,
run_tests,
TEST_WITH_UBSAN,
load_tests,
parametrize,
instantiate_parametrized_tests,
gradcheck,
skipIfRocm,
skipIfTorchDynamo
)
from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA
from typing import Dict, Any, Tuple
from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook
from unittest.mock import patch
load_tests = load_tests
from torch.optim import adam, adamw
import gc
import gc
import weakref
import types
import types
from torch.nn import Parameter
import gc
import weakref
class TestSWAUtils(TestCase):
from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop,
asgd, adamax, adadelta, adagrad)
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/test_overrides.py
|
__repr__
|
def __repr__(self):
return "DiagonalTensor(N={}, value={})".format(self._N, self._i)
|
def __repr__(self):
return f"DiagonalTensor(N={self._N}, value={self._i})"
|
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_overridable_functions,
get_testing_overrides,
is_tensor_method_or_property,
TorchFunctionMode,
_get_current_function_mode,
_get_current_function_mode_stack,
)
from torch.utils._mode_utils import all_same_mode
from torch.utils._pytree import tree_map
Tensor = torch.Tensor
HANDLED_FUNCTIONS_DIAGONAL = {}
class DiagonalTensor:
from torch.testing._internal.generated.annotated_fn_args import annotated_args
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
from torch.distributions.utils import broadcast_all
|
import torch
import numpy as np
import inspect
import functools
import pprint
import pickle
import collections
import unittest
import contextlib
from torch.testing._internal.common_utils import TestCase, run_tests, TEST_WITH_CROSSREF, TEST_WITH_TORCHDYNAMO
from torch.overrides import (
handle_torch_function,
has_torch_function,
get_ignored_functions,
get_overridable_functions,
get_testing_overrides,
resolve_name,
is_tensor_method_or_property,
TorchFunctionMode,
_get_current_function_mode,
_get_current_function_mode_stack,
BaseTorchFunctionMode
)
from torch.utils._mode_utils import all_same_mode
from torch.utils._pytree import tree_map
Tensor = torch.Tensor
HANDLED_FUNCTIONS_DIAGONAL = {}
class DiagonalTensor:
from torch.testing._internal.generated.annotated_fn_args import annotated_args
from torch.testing._internal.common_utils import gradcheck, gradgradcheck
from torch.distributions.utils import broadcast_all
from torch._C import _is_torch_function_all_disabled
from torch._C import _len_torch_function_stack
from torch.utils._device import DeviceContext
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
modified
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.