library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/test_optim.py
__init__
def __init__(self): super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class SchedulerTestNet(torch.nn.Module): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
forward
def forward(self, x): return self.conv2(F.relu(self.conv1(x)))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class SchedulerTestNet(torch.nn.Module): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
__init__
def __init__(self): super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class SchedulerTestNet(torch.nn.Module): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
__call__
def __call__(self, epoch): return self.value * epoch
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class LambdaLRTestObject: import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_linear_linearlr_is_constant_for_constant_epoch
def test_linear_linearlr_is_constant_for_constant_epoch(self): scheduler = LinearLR(self.opt) self._test_lr_is_constant_for_constant_epoch(scheduler)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_polynomial_lr_is_constant_for_constant_epoch
def test_polynomial_lr_is_constant_for_constant_epoch(self): scheduler = PolynomialLR(self.opt, power=0.9) self._test_lr_is_constant_for_constant_epoch(scheduler)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_step_lr
def test_step_lr(self): # lr = 0.05 if epoch < 3 # lr = 0.005 if 30 <= epoch < 6 # lr = 0.0005 if epoch >= 9 epochs = 10 single_targets = [0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_get_last_lr_step_lr
def test_get_last_lr_step_lr(self): from torch.nn import Parameter epochs = 10 optimizer = torch.optim.SGD( [Parameter(torch.randn(2, 2, requires_grad=True))], 0.1 ) targets = [[0.1] * 3 + [0.01] * 3 + [0.001] * 3 + [0.0001]] scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 3, gamma=0.1) self._test_get_last_lr(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_get_last_lr_multi_step_lr
def test_get_last_lr_multi_step_lr(self): # lr = 0.05 if epoch < 2 # lr = 0.005 if 2 <= epoch < 5 # lr = 0.0005 if 5 <= epoch < 9 # lr = 0.00005 if 9 <= epoch epochs = 10 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 1 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test_get_last_lr(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_multi_step_lr
def test_multi_step_lr(self): # lr = 0.05 if epoch < 2 # lr = 0.005 if 2 <= epoch < 5 # lr = 0.0005 if epoch < 9 # lr = 0.00005 if epoch >= 9 epochs = 10 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_multi_step_lr_with_epoch
def test_multi_step_lr_with_epoch(self): # lr = 0.05 if epoch < 2 # lr = 0.005 if 2 <= epoch < 5 # lr = 0.0005 if epoch < 9 # lr = 0.00005 if epoch >= 9 epochs = 10 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 3 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test_with_epoch(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_get_last_lr_constantlr
def test_get_last_lr_constantlr(self): # lr = 0.025 if epoch < 5 # lr = 0.005 if 5 <= epoch epochs = 10 single_targets = [0.025] * 5 + [0.05] * 5 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5) self._test_get_last_lr(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_get_last_lr_linearlr
def test_get_last_lr_linearlr(self): # lr = 0.025 if epoch == 0 # lr = 0.03125 if epoch == 1 # lr = 0.0375 if epoch == 2 # lr = 0.04375 if epoch == 3 # lr = 0.005 if 4 <= epoch epochs = 10 start_factor = 1.0 / 4 end_factor = 3.0 / 5 iters = 4 interpolation = [ start_factor + i * (end_factor - start_factor) / iters for i in range(iters) ] single_targets = [x * 0.05 for x in interpolation] + [0.05 * end_factor] * ( epochs - iters ) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = LinearLR( self.opt, start_factor=start_factor, end_factor=end_factor, total_iters=iters, ) self._test_get_last_lr(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_constantlr
def test_constantlr(self): # lr = 0.025 if epoch < 5 # lr = 0.005 if 5 <= epoch epochs = 10 single_targets = [0.025] * 5 + [0.05] * 5 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_new_pattern_no_warning
def test_new_pattern_no_warning(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised for _ in range(epochs): self.opt.step() scheduler.step() self.assertTrue(len(ws) == 0, "No warning should be raised")
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_new_pattern_no_warning_with_arg
def test_new_pattern_no_warning_with_arg(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised for _ in range(epochs): self.opt.step() scheduler.step() self.assertTrue(len(ws) == 0, "No warning should be raised")
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_new_pattern_no_warning_with_overridden_optim_step
def test_new_pattern_no_warning_with_overridden_optim_step(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") # emulate use-case with optimizer.step overridden import types old_step = self.opt.step def new_step(o, *args, **kwargs): retval = old_step(*args, **kwargs) return retval self.opt.step = types.MethodType(new_step, self.opt) def new_pattern(): for e in range(epochs): self.opt.step() scheduler.step() self.assertWarnsRegex( UserWarning, r"`optimizer.step\(\)` has been overridden", new_pattern )
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
new_step
def new_step(o, *args, **kwargs): retval = old_step(*args, **kwargs) return retval self.opt.step = types.MethodType(new_step, self.opt)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
_test_lr_is_constant_for_constant_epoch
def _test_lr_is_constant_for_constant_epoch(self, scheduler): l = [] for _ in range(10): scheduler.optimizer.step() with warnings.catch_warnings(record=True) as w: scheduler.step(2) self._check_warning_is_epoch_deprecation_warning(w) l.append(self.opt.param_groups[0]["lr"]) self.assertEqual(min(l), max(l))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_step_lr_is_constant_for_constant_epoch
def test_step_lr_is_constant_for_constant_epoch(self): scheduler = StepLR(self.opt, 2) self._test_lr_is_constant_for_constant_epoch(scheduler)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_exponential_lr_is_constant_for_constant_epoch
def test_exponential_lr_is_constant_for_constant_epoch(self): scheduler = ExponentialLR(self.opt, gamma=0.9) self._test_lr_is_constant_for_constant_epoch(scheduler)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_constantlr_is_constant_for_constant_epoch
def test_constantlr_is_constant_for_constant_epoch(self): scheduler = ConstantLR(self.opt) self._test_lr_is_constant_for_constant_epoch(scheduler)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
setUp
def setUp(self): super().setUp() self.net = SchedulerTestNet() self.opt = SGD( [ {"params": self.net.conv1.parameters()}, {"params": self.net.conv2.parameters(), "lr": 0.5}, ], lr=0.05, )
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490 all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs( device, dtype, optim_info, skip=("differentiable",) ) weight = Parameter( torch.randn(2, 3, requires_grad=True, device=device, dtype=dtype) ) bias = Parameter(torch.randn(2, requires_grad=True, device=device, dtype=dtype)) input = torch.randn(3, requires_grad=True, device=device, dtype=dtype) params = [weight, bias]
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
_check_warning_is_epoch_deprecation_warning
def _check_warning_is_epoch_deprecation_warning(self, w, *, num_warnings: int = 1): """This function swallows the epoch deprecation warning which is produced when we call `scheduler.step(epoch)` with some not `None` value of `epoch`. this is deprecated, and this function will need to be removed/updated when the schedulers no longer accept the parameter at all. """ self.assertEqual(len(w), num_warnings) for warning in w: self.assertEqual(len(warning.message.args), 1) self.assertEqual(warning.message.args[0], EPOCH_DEPRECATION_WARNING)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
__init__
def __init__(self): super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class SchedulerTestNet(torch.nn.Module): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
get_lr
def get_lr(self, step): global_step = self.last_epoch gamma_power = ( [0] + [i + 1 for i, m in enumerate(self.milestones) if global_step >= m] )[-1] return [ init_lr * (self.gamma**gamma_power) for init_lr in self.init_lr ]
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class MultiStepLR(torch.optim.lr_scheduler.LRScheduler): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_no_cyclic_references_in_step
def test_no_cyclic_references_in_step(self): import gc import weakref def run(): param = torch.empty(10, requires_grad=True) optim = SGD(params=[param], lr=0.5) scheduler = LambdaLR(optim, lambda epoch: 1.0) param.sum().backward() optim.step() scheduler.step() return weakref.ref(scheduler) # To ensure that there are no reference cycles in scheduler, # we need to turn off the garbage collector. Since gc will # automatically collect unreachable objects. gc.disable() ref = run() assert ref() is None gc.enable() # restore
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
run
def run(): param = torch.empty(10, requires_grad=True) optim = SGD(params=[param], lr=0.5) scheduler = LambdaLR(optim, lambda epoch: 1.0) param.sum().backward() optim.step() scheduler.step() return weakref.ref(scheduler) # To ensure that there are no reference cycles in scheduler, # we need to turn off the garbage collector. Since gc will # automatically collect unreachable objects. gc.disable() ref = run() assert ref() is None gc.enable() # restore
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_old_pattern_warning
def test_old_pattern_warning(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_closed_form_constantlr
def test_closed_form_constantlr(self): scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4) closed_form_scheduler = ConstantLR(self.opt, factor=1.0 / 3, total_iters=4) self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_closed_form_multi_step_lr
def test_closed_form_multi_step_lr(self): scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) closed_form_scheduler = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_closed_form_exp_lr
def test_closed_form_exp_lr(self): scheduler = ExponentialLR(self.opt, gamma=0.9) closed_form_scheduler = ExponentialLR(self.opt, gamma=0.9) self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_closed_form_poly_lr
def test_closed_form_poly_lr(self): scheduler = PolynomialLR(self.opt, power=0.9) closed_form_scheduler = PolynomialLR(self.opt, power=0.9) self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_closed_form_cos_anneal_lr
def test_closed_form_cos_anneal_lr(self): eta_min = 1e-10 epochs = 20 T_max = 5 scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min) closed_form_scheduler = CosineAnnealingLR( self.opt, T_max=T_max, eta_min=eta_min ) self._test_against_closed_form(scheduler, closed_form_scheduler, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cos_anneal_lr_continue
def test_cos_anneal_lr_continue(self): eta_min = 0.1 T_max = 5 scheduler = CosineAnnealingLR(self.opt, T_max=T_max, eta_min=eta_min) self.opt.step() scheduler.step() original_lrs = scheduler._last_lr new_scheduler = CosineAnnealingLR( self.opt, T_max=T_max, eta_min=eta_min, last_epoch=0 ) new_lrs = new_scheduler._last_lr torch.testing.assert_close(original_lrs, new_lrs, rtol=1e-4, atol=1e-5)
# Skip differentiable testing for now, see https://github.com/pytorch/pytorch/issues/116490 all_optim_inputs = _get_optim_inputs_including_global_cliquey_kwargs( device, dtype, optim_info, skip=("differentiable",) )
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau1
def test_reduce_lr_on_plateau1(self): epochs = 10 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 20] metrics = [10 - i * 0.0167 for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, threshold_mode="abs", mode="min", threshold=0.01, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau2
def test_reduce_lr_on_plateau2(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2] metrics = [10 - i * 0.0165 for i in range(22)] scheduler = ReduceLROnPlateau( self.opt, patience=5, cooldown=0, threshold_mode="abs", mode="min", threshold=0.1, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau3
def test_reduce_lr_on_plateau3(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4] metrics = [-0.8] * 2 + [-0.234] * 20 scheduler = ReduceLROnPlateau( self.opt, mode="max", patience=5, cooldown=5, threshold_mode="abs" ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau4
def test_reduce_lr_on_plateau4(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 20] metrics = [1.5 * (1.025**i) for i in range(20)] # 1.025 > 1.1**0.25 scheduler = ReduceLROnPlateau( self.opt, mode="max", patience=3, threshold_mode="rel", threshold=0.1 ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
old_pattern
def old_pattern(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_old_pattern_warning_with_arg
def test_old_pattern_warning_with_arg(self): epochs = 35 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
old_pattern2
def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_old_pattern_warning_resuming
def test_old_pattern_warning_resuming(self): epochs = 35 for i, group in enumerate(self.opt.param_groups): group["initial_lr"] = 0.01 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
old_pattern
def old_pattern(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_old_pattern_warning_resuming_with_arg
def test_old_pattern_warning_resuming_with_arg(self): epochs = 35 for i, group in enumerate(self.opt.param_groups): group["initial_lr"] = 0.01 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10) self.assertTrue(len(ws) == 0, "No warning should be raised") def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
old_pattern2
def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_old_pattern_warning_with_overridden_optim_step
def test_old_pattern_warning_with_overridden_optim_step(self): epochs = 35 for i, group in enumerate(self.opt.param_groups): group["initial_lr"] = 0.01 with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised scheduler = StepLR(self.opt, gamma=0.1, step_size=3, last_epoch=10) self.assertTrue(len(ws) == 0, "No warning should be raised") # emulate use-case with optimizer.step overridden import types old_step = self.opt.step def new_step(o, *args, **kwargs): retval = old_step(*args, **kwargs) return retval self.opt.step = types.MethodType(new_step, self.opt) def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
new_step
def new_step(o, *args, **kwargs): retval = old_step(*args, **kwargs) return retval self.opt.step = types.MethodType(new_step, self.opt)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
old_pattern2
def old_pattern2(): for _ in range(epochs): scheduler.step() self.opt.step() self.assertWarnsRegex(UserWarning, r"how-to-adjust-learning-rate", old_pattern2)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_linearlr
def test_linearlr(self): # lr = 0.025 if epoch == 0 # lr = 0.03125 if epoch == 1 # lr = 0.0375 if epoch == 2 # lr = 0.04375 if epoch == 3 # lr = 0.005 if 4 <= epoch epochs = 10 start_factor = 1.0 / 2 iters = 4 interpolation = [ start_factor + i * (1 - start_factor) / iters for i in range(iters) ] single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_linearlr_start_factor_limits1
def test_linearlr_start_factor_limits1(self): start_factor = 0.0 iters = 4 with self.assertRaises(ValueError): LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_linearlr_start_factor_limits2
def test_linearlr_start_factor_limits2(self): start_factor = 1.1 iters = 4 with self.assertRaises(ValueError): LinearLR(self.opt, start_factor=start_factor, total_iters=iters)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_constantlr_with_epoch
def test_constantlr_with_epoch(self): # lr = 0.025 if epoch < 5 # lr = 0.005 if 5 <= epoch epochs = 10 single_targets = [0.025] * 5 + [0.05] * 5 targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ConstantLR(self.opt, factor=1.0 / 2, total_iters=5) self._test_with_epoch(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_linearlr_with_epoch
def test_linearlr_with_epoch(self): # lr = 0.025 if epoch == 0 # lr = 0.03125 if epoch == 1 # lr = 0.0375 if epoch == 2 # lr = 0.04375 if epoch == 3 # lr = 0.005 if 4 <= epoch epochs = 10 start_factor = 1.0 / 2 end_factor = 1.0 iters = 4 interpolation = [ start_factor + i * (end_factor - start_factor) / iters for i in range(iters) ] single_targets = [x * 0.05 for x in interpolation] + [0.05] * (epochs - iters) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test_with_epoch(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_exp_lr
def test_exp_lr(self): epochs = 10 single_targets = [0.05 * (0.9**x) for x in range(epochs)] targets = [single_targets, [x * epochs for x in single_targets]] scheduler = ExponentialLR(self.opt, gamma=0.9) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_poly_lr
def test_poly_lr(self): epochs = 10 power = 0.9 total_iters = 5 single_targets = [ (1.0 - x / total_iters) ** power * 0.05 for x in range(total_iters) ] + [0.0] * (epochs - total_iters) targets = [single_targets, [x * epochs for x in single_targets]] scheduler = PolynomialLR(self.opt, power=power, total_iters=total_iters) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cos_anneal_lr
def test_cos_anneal_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] targets = [single_targets, [x * epochs for x in single_targets]] scheduler = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_closed_form_step_lr
def test_closed_form_step_lr(self): scheduler = StepLR(self.opt, gamma=0.1, step_size=3) closed_form_scheduler = StepLR(self.opt, gamma=0.1, step_size=3) self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_closed_form_linearlr
def test_closed_form_linearlr(self): scheduler = LinearLR( self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4 ) closed_form_scheduler = LinearLR( self.opt, start_factor=1.0 / 3, end_factor=0.7, total_iters=4 ) self._test_against_closed_form(scheduler, closed_form_scheduler, 20)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_linearlr_and_multistep_lr
def test_compound_linearlr_and_multistep_lr(self): epochs = 10 iters = 4 start_factor = 0.4 schedulers = [None] * 2 single_targets = [0.05] * 2 + [0.005] * 3 + [0.0005] * 4 + [0.00005] * 2 for i in range(iters): single_targets[i] *= start_factor + i / iters * (1 - start_factor) targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_cosanneal_and_step_lr
def test_compound_cosanneal_and_step_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] single_targets = [x * 0.1 ** (i // 3) for i, x in enumerate(single_targets)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers = [None] * 2 schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_cosanneal_and_multistep_lr
def test_compound_cosanneal_and_multistep_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] multipliers = [1] * 2 + [0.1] * 3 + [0.01] * 4 + [0.001] single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers = [None] * 2 schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_cosanneal_and_linearlr
def test_compound_cosanneal_and_linearlr(self): epochs = 10 iters = 4 start_factor = 0.4 eta_min = 1e-10 schedulers = [None] * 2 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] for i in range(iters): single_targets[i] *= start_factor + i / iters * (1 - start_factor) targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) schedulers[1] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_cosanneal_and_exp_lr
def test_compound_cosanneal_and_exp_lr(self): epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] multipliers = [0.1**i for i in range(epochs)] single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers = [None] * 2 schedulers[0] = CosineAnnealingLR(self.opt, T_max=epochs, eta_min=eta_min) schedulers[1] = ExponentialLR(self.opt, gamma=0.1) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_reduce_lr_on_plateau1
def test_compound_reduce_lr_on_plateau1(self): epochs = 10 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * 20 multipliers = [0.1 ** (i // 3) for i in range(20)] single_targets = [x * y for x, y in zip(multipliers, single_targets)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [10 - i * 0.0167 for i in range(20)] schedulers = [None, None] schedulers[0] = ReduceLROnPlateau( self.opt, threshold_mode="abs", mode="min", threshold=0.01, patience=5, cooldown=5, ) schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=3) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_reduce_lr_on_plateau2
def test_compound_reduce_lr_on_plateau2(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2 multipliers = [1] * 3 + [0.1] * 5 + [0.01] * 4 + [0.001] * 10 single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [10 - i * 0.0165 for i in range(22)] schedulers = [None] * 2 schedulers[0] = ReduceLROnPlateau( self.opt, patience=5, cooldown=0, threshold_mode="abs", mode="min", threshold=0.1, ) schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[3, 8, 12]) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_reduce_lr_on_plateau3
def test_compound_reduce_lr_on_plateau3(self): epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * (2 + 6) + [0.05] * (5 + 6) + [0.005] * 4 multipliers = [0.1**i for i in range(epochs)] single_targets = [x * y for x, y in zip(multipliers, single_targets)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [-0.8] * 2 + [-0.234] * 20 schedulers = [None, None] schedulers[0] = ReduceLROnPlateau( self.opt, mode="max", patience=5, cooldown=5, threshold_mode="abs" ) schedulers[1] = ExponentialLR(self.opt, gamma=0.1) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_reduce_lr_on_plateau4
def test_compound_reduce_lr_on_plateau4(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.05 epochs = 10 eta_min = 1e-10 single_targets = [ eta_min + (0.05 - eta_min) * (1 + math.cos(math.pi * x / epochs)) / 2 for x in range(epochs) ] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [1.5 * (1.025**i) for i in range(20)] # 1.025 > 1.1**0.25 schedulers = [None, None] schedulers[0] = ReduceLROnPlateau( self.opt, mode="max", patience=3, threshold_mode="rel", threshold=0.1 ) schedulers[1] = CosineAnnealingLR(self.opt, epochs, eta_min) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_reduce_lr_on_plateau5
def test_compound_reduce_lr_on_plateau5(self): iters = 4 start_factor = 0.4 epochs = 22 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 single_targets = [0.5] * 6 + [0.05] * 7 + [0.005] * 7 + [0.0005] * 2 multipliers = [1] * 22 for i in range(iters): multipliers[i] *= start_factor + i / iters * (1 - start_factor) single_targets = [x * y for x, y in zip(single_targets, multipliers)] targets = [single_targets] targets = targets[1:] # test runs step before checking lr metrics = [10 - i * 0.0165 for i in range(22)] schedulers = [None] * 2 schedulers[0] = ReduceLROnPlateau( self.opt, patience=5, cooldown=0, threshold_mode="abs", mode="min", threshold=0.1, ) schedulers[1] = LinearLR(self.opt, start_factor=start_factor, total_iters=iters) self._test_reduce_lr_on_plateau(schedulers, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau5
def test_reduce_lr_on_plateau5(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4] metrics = [1.5 * (1.005**i) for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, mode="max", threshold_mode="rel", threshold=0.1, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau6
def test_reduce_lr_on_plateau6(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 20] metrics = [1.5 * (0.85**i) for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, mode="min", threshold_mode="rel", threshold=0.1 ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau7
def test_reduce_lr_on_plateau7(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.05] * (5 + 6) + [0.005] * 4] metrics = [1] * 7 + [0.6] + [0.5] * 12 scheduler = ReduceLROnPlateau( self.opt, mode="min", threshold_mode="rel", threshold=0.1, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_reduce_lr_on_plateau8
def test_reduce_lr_on_plateau8(self): epochs = 20 for param_group in self.opt.param_groups: param_group["lr"] = 0.5 targets = [[0.5] * 6 + [0.4] * 14, [0.5] * 6 + [0.3] * 14] metrics = [1.5 * (1.005**i) for i in range(20)] scheduler = ReduceLROnPlateau( self.opt, mode="max", threshold_mode="rel", min_lr=[0.4, 0.3], threshold=0.1, patience=5, cooldown=5, ) self._test_reduce_lr_on_plateau(scheduler, targets, metrics, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_sequentiallr1
def test_sequentiallr1(self): epochs = 19 schedulers = [None] * 2 targets = [ [0.05, 0.04, 0.032] + [0.05 for x in range(4)] + [0.05 * 0.1 for x in range(4)] + [0.05 * 0.01 for x in range(4)] + [0.05 * 0.001 for x in range(4)] ] milestones = [3] schedulers[0] = ExponentialLR(self.opt, gamma=0.8) schedulers[1] = StepLR(self.opt, gamma=0.1, step_size=4) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_sequentiallr2
def test_sequentiallr2(self): epochs = 13 schedulers = [None] * 2 targets = [[0.005, 0.005, 0.005] + [0.05 * 0.9**x for x in range(10)]] milestones = [3] schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_sequentiallr3
def test_sequentiallr3(self): epochs = 12 schedulers = [None] * 3 targets = [ [0.005, 0.005, 0.005] + [0.05, 0.04, 0.032] + [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005] ] milestones = [3, 6] schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.8) schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) self._test(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_sequentiallr4
def test_sequentiallr4(self): optimizer = torch.optim.SGD([torch.tensor(0.5)], lr=0.1) prev_lr = optimizer.param_groups[0]["lr"] schedulers = [ torch.optim.lr_scheduler.ConstantLR(optimizer, factor=1), torch.optim.lr_scheduler.ConstantLR(optimizer, factor=0.1), ] scheduler = torch.optim.lr_scheduler.SequentialLR( optimizer, schedulers, milestones=[10] ) new_lr = optimizer.param_groups[0]["lr"] # Ensure that multiple schedulers does not affect the initial learning rate self.assertEqual(prev_lr, new_lr)
params = [ Parameter(torch.randn(2, 3, device=device, dtype=dtype)) for _ in range(2) ] for p in params: p.grad = torch.rand_like(p) if optim_info.only_supports_sparse_grads: # For this test, we naively convert the Tensor layout, which we know does # NOT represent the expected use case for optims like SparseAdam! p.grad = p.grad.to_sparse()
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_get_last_lr_sequentiallr
def test_get_last_lr_sequentiallr(self): epochs = 12 milestones = [3, 6] schedulers = [None] * 3 schedulers[0] = ConstantLR(self.opt, factor=0.1, total_iters=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.8) schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=2) scheduler = SequentialLR(self.opt, schedulers=schedulers, milestones=milestones) constant_lr_target = [0.005] * 3 exponential_lr_target = [0.05, 0.04, 0.032] step_lr_target = [0.05, 0.05, 0.005, 0.005, 0.0005, 0.0005] single_targets = constant_lr_target + exponential_lr_target + step_lr_target targets = [single_targets, [x * 10 for x in single_targets]] self._test_get_last_lr(scheduler, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_chained_lr2_get_last_lr_before_step
def test_chained_lr2_get_last_lr_before_step(self): schedulers = [ LinearLR(self.opt, start_factor=0.4, total_iters=3), MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1), ] scheduler = ChainedScheduler(schedulers) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_chained_lr1
def test_chained_lr1(self): epochs = 10 schedulers = [None] * 1 targets = [[0.05] * 3 + [0.005] * 3 + [0.0005] * 3 + [0.00005] * 3] schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_chained_lr2
def test_chained_lr2(self): epochs = 10 schedulers = [None] * 1 targets = [[0.02, 0.03, 0.04] + [0.05] * 9] schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_chained_lr3
def test_chained_lr3(self): epochs = 10 schedulers = [None] * 2 targets = [ [0.02, 0.03, 0.04, 0.05] + [0.005] * 4 + [0.0005] * 3 + [0.00005] * 3 ] schedulers[0] = LinearLR(self.opt, start_factor=0.4, total_iters=3) schedulers[1] = MultiStepLR(self.opt, milestones=[4, 8, 10], gamma=0.1) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_chained_lr4
def test_chained_lr4(self): epochs = 9 schedulers = [None] * 3 targets = [ [0.05 * 0.2 * 0.9**x for x in range(3)] + [0.05 * 0.2 * 0.9**3 * 0.1] + [0.05 * 0.9**x * 0.1 for x in range(4, 6)] + [0.05 * 0.9**x * 0.01 for x in range(6, 9)] ] schedulers[0] = ExponentialLR(self.opt, gamma=0.9) schedulers[1] = ConstantLR(self.opt, factor=0.2, total_iters=4) schedulers[2] = StepLR(self.opt, gamma=0.1, step_size=3) scheduler = ChainedScheduler(schedulers) self._test([scheduler], targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
poly_lr
def poly_lr(lr: float): return [ (lr * ((1.0 - x / total_iters) ** power)) for x in range(total_iters) ] + [0.0] * (epochs - total_iters) schedulers = [None] * 2 epochs = 10 power = 0.9 total_iters = 5 const_factor = 0.1 single_targets = [x * const_factor for x in poly_lr(lr=0.05)] targets = [single_targets, [x * const_factor for x in poly_lr(0.5)]] schedulers[0] = PolynomialLR(self.opt, power=power, total_iters=total_iters) schedulers[1] = ConstantLR(self.opt, factor=const_factor) scheduler = ChainedScheduler(schedulers) self._test(scheduler, targets, epochs) self.assertEqual(scheduler.get_last_lr(), schedulers[-1].get_last_lr())
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_step_and_multistep_lr
def test_compound_step_and_multistep_lr(self): epochs = 10 schedulers = [None] * 2 schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) schedulers[1] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) targets = [[0.05] * 2 + [0.005] * 1 + [5e-4] * 2 + [5e-5] + [5e-6] * 3 + [5e-8]] self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_step_and_exp_lr
def test_compound_step_and_exp_lr(self): epochs = 10 schedulers = [None] * 2 single_targets = [0.05 * (0.9**x) for x in range(3)] single_targets += [0.005 * (0.9**x) for x in range(3, 6)] single_targets += [0.0005 * (0.9**x) for x in range(6, 9)] single_targets += [0.00005 * (0.9**x) for x in range(9, 12)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_exp_and_multistep_lr
def test_compound_exp_and_multistep_lr(self): epochs = 10 schedulers = [None] * 2 single_targets = [0.05 * (0.9**x) for x in range(2)] single_targets += [0.005 * (0.9**x) for x in range(2, 5)] single_targets += [0.0005 * (0.9**x) for x in range(5, 9)] single_targets += [0.00005 * (0.9**x) for x in range(9, 11)] targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = MultiStepLR(self.opt, gamma=0.1, milestones=[2, 5, 9]) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_exp_and_linearlr
def test_compound_exp_and_linearlr(self): epochs = 10 iters = 4 start_factor = 0.4 end_factor = 0.9 schedulers = [None] * 2 single_targets = [0.05 * (0.9**x) for x in range(11)] for i in range(iters): single_targets[i] *= start_factor + i / iters * (end_factor - start_factor) for i in range(iters, 11): single_targets[i] *= end_factor targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = LinearLR( self.opt, start_factor=start_factor, end_factor=end_factor, total_iters=iters, ) schedulers[1] = ExponentialLR(self.opt, gamma=0.9) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_compound_step_and_constantlr
def test_compound_step_and_constantlr(self): epochs = 10 iters = 4 factor = 0.4 schedulers = [None] * 2 single_targets = ( [0.05 * 0.4] * 3 + [0.005 * 0.4] + [0.005] * 2 + [0.0005] * 3 + [0.00005] * 3 ) targets = [single_targets, [x * epochs for x in single_targets]] schedulers[0] = StepLR(self.opt, gamma=0.1, step_size=3) schedulers[1] = ConstantLR(self.opt, factor=0.4, total_iters=4) self._test(schedulers, targets, epochs)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_invalid_mode
def test_cycle_lr_invalid_mode(self): with self.assertRaises(ValueError): scheduler = CyclicLR(self.opt, base_lr=0, max_lr=0, mode="CATS")
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_triangular_mode_one_lr
def test_cycle_lr_triangular_mode_one_lr(self): lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3] momentum_target = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, cycle_momentum=True, base_momentum=1, max_momentum=5, mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_triangular_mode_one_lr_no_momentum
def test_cycle_lr_triangular_mode_one_lr_no_momentum(self): lr_target = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3] lr_targets = [lr_target, lr_target] momentum_target = [self.opt.defaults["momentum"]] * len(lr_target) momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, cycle_momentum=False, mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_triangular2_mode_one_lr
def test_cycle_lr_triangular2_mode_one_lr(self): lr_target = [ 1, 2, 3, 4, 5, 4, 3, 2, 1, 1.5, 2.0, 2.5, 3.0, 2.5, 2.0, 1.5, 1, 1.25, 1.50, 1.75, 2.00, 1.75, ] momentum_target = [ 5.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 4.5, 4.0, 3.5, 3.0, 3.5, 4.0, 4.5, 5.0, 4.75, 4.5, 4.25, 4.0, 4.25, ] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, cycle_momentum=True, base_momentum=1, max_momentum=5, mode="triangular2", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_exp_range_mode_one_lr
def test_cycle_lr_exp_range_mode_one_lr(self): base_lr, max_lr = 1, 5 diff_lr = max_lr - base_lr gamma = 0.9 xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1] lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)] momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)] lr_targets = [lr_target, lr_target] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=base_lr, max_lr=max_lr, step_size_up=4, cycle_momentum=True, base_momentum=base_lr, max_momentum=max_lr, mode="exp_range", gamma=gamma, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_triangular_mode
def test_cycle_lr_triangular_mode(self): lr_target_1 = [1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3] lr_target_2 = [x + 1 for x in lr_target_1] lr_targets = [lr_target_1, lr_target_2] momentum_target_1 = [5, 4, 3, 2, 1, 2, 3, 4, 5, 4, 3] momentum_target_2 = [x + 1 for x in momentum_target_1] momentum_targets = [momentum_target_1, momentum_target_2] scheduler = CyclicLR( self.opt, base_lr=[1, 2], max_lr=[5, 6], step_size_up=4, cycle_momentum=True, base_momentum=[1, 2], max_momentum=[5, 6], mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_triangular2_mode
def test_cycle_lr_triangular2_mode(self): lr_target_1 = [ 1, 2, 3, 4, 5, 4, 3, 2, 1, 1.5, 2.0, 2.5, 3.0, 2.5, 2.0, 1.5, 1, 1.25, 1.50, 1.75, 2.00, 1.75, ] lr_target_2 = [x + 2 for x in lr_target_1] lr_targets = [lr_target_1, lr_target_2] momentum_target_1 = [ 5.0, 4.0, 3.0, 2.0, 1.0, 2.0, 3.0, 4.0, 5.0, 4.5, 4.0, 3.5, 3.0, 3.5, 4.0, 4.5, 5.0, 4.75, 4.5, 4.25, 4.0, 4.25, ] momentum_target_2 = [x + 2 for x in momentum_target_1] momentum_targets = [momentum_target_1, momentum_target_2] scheduler = CyclicLR( self.opt, base_lr=[1, 3], max_lr=[5, 7], step_size_up=4, cycle_momentum=True, base_momentum=[1, 3], max_momentum=[5, 7], mode="triangular2", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_exp_range_mode
def test_cycle_lr_exp_range_mode(self): base_lr_1, max_lr_1 = 1, 5 base_lr_2, max_lr_2 = 5, 12 diff_lr_1 = max_lr_1 - base_lr_1 diff_lr_2 = max_lr_2 - base_lr_2 gamma = 0.9 xs = [0, 0.25, 0.5, 0.75, 1, 0.75, 0.50, 0.25, 0, 0.25, 0.5, 0.75, 1] lr_target_1 = [base_lr_1 + x * diff_lr_1 * gamma**i for i, x in enumerate(xs)] lr_target_2 = [base_lr_2 + x * diff_lr_2 * gamma**i for i, x in enumerate(xs)] lr_targets = [lr_target_1, lr_target_2] momentum_target_1 = [ max_lr_1 - x * diff_lr_1 * gamma**i for i, x in enumerate(xs) ] momentum_target_2 = [ max_lr_2 - x * diff_lr_2 * gamma**i for i, x in enumerate(xs) ] momentum_targets = [momentum_target_1, momentum_target_2] scheduler = CyclicLR( self.opt, base_lr=[base_lr_1, base_lr_2], max_lr=[max_lr_1, max_lr_2], step_size_up=4, cycle_momentum=True, base_momentum=[base_lr_1, base_lr_2], max_momentum=[max_lr_1, max_lr_2], mode="exp_range", gamma=gamma, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target_1))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_triangular_mode_step_size_up_down
def test_cycle_lr_triangular_mode_step_size_up_down(self): lr_target = [ 1.0, 2.0, 3.0, 4.0, 5.0, 13.0 / 3, 11.0 / 3, 9.0 / 3, 7.0 / 3, 5.0 / 3, 1.0, ] lr_targets = [lr_target, lr_target] momentum_target = [ 5.0, 4.0, 3.0, 2.0, 1.0, 5.0 / 3, 7.0 / 3, 3.0, 11.0 / 3, 13.0 / 3, 5.0, ] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=1, max_lr=5, step_size_up=4, step_size_down=6, cycle_momentum=True, base_momentum=1, max_momentum=5, mode="triangular", ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_triangular2_mode_step_size_up_down
def test_cycle_lr_triangular2_mode_step_size_up_down(self): lr_base_target = [ 1.0, 3.0, 5.0, 13.0 / 3, 11.0 / 3, 9.0 / 3, 7.0 / 3, 5.0 / 3, 1.0, 2.0, 3.0, 8.0 / 3, 7.0 / 3, 6.0 / 3, 5.0 / 3, 4.0 / 3, 1.0, 3.0 / 2, 2.0, 11.0 / 6, 10.0 / 6, 9.0 / 6, 8.0 / 6, 7.0 / 6, ] momentum_base_target = [ 5.0, 3.0, 1.0, 5.0 / 3, 7.0 / 3, 3.0, 11.0 / 3, 13.0 / 3, 5.0, 4.0, 3.0, 10.0 / 3, 11.0 / 3, 4.0, 13.0 / 3, 14.0 / 3, 5.0, 4.5, 4.0, 25.0 / 6, 13.0 / 3, 4.5, 14.0 / 3, 29.0 / 6, ] deltas = [2 * i for i in range(0, 2)] base_lrs = [1 + delta for delta in deltas] max_lrs = [5 + delta for delta in deltas] lr_targets = [[x + delta for x in lr_base_target] for delta in deltas] momentum_targets = [ [x + delta for x in momentum_base_target] for delta in deltas ] scheduler = CyclicLR( self.opt, base_lr=base_lrs, max_lr=max_lrs, step_size_up=2, step_size_down=6, cycle_momentum=True, base_momentum=base_lrs, max_momentum=max_lrs, mode="triangular2", ) self._test_cycle_lr( scheduler, lr_targets, momentum_targets, len(lr_base_target) )
# Needed for second order optims like LBFGS closure_loss = torch.rand(1, device=device, dtype=dtype)
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/test_optim.py
test_cycle_lr_exp_range_mode_step_size_up_down
def test_cycle_lr_exp_range_mode_step_size_up_down(self): base_lr, max_lr = 1, 5 diff_lr = max_lr - base_lr gamma = 0.9 xs = [ 0.0, 0.5, 1.0, 5.0 / 6, 4.0 / 6, 3.0 / 6, 2.0 / 6, 1.0 / 6, 0.0, 0.5, 1.0, 5.0 / 6, 4.0 / 6, ] lr_target = [base_lr + x * diff_lr * gamma**i for i, x in enumerate(xs)] lr_targets = [lr_target, lr_target] momentum_target = [max_lr - x * diff_lr * gamma**i for i, x in enumerate(xs)] momentum_targets = [momentum_target, momentum_target] scheduler = CyclicLR( self.opt, base_lr=base_lr, max_lr=max_lr, step_size_up=2, step_size_down=6, cycle_momentum=True, base_momentum=base_lr, max_momentum=max_lr, mode="exp_range", gamma=gamma, ) self._test_cycle_lr(scheduler, lr_targets, momentum_targets, len(lr_target))
import warnings import math import unittest import functools import itertools import pickle from copy import deepcopy import weakref import torch import torch.optim as optim import torch.nn.functional as F from torch.nn import Parameter from torch.optim import Adam, SGD, Optimizer from torch import sparse from torch.optim.lr_scheduler import ( LambdaLR, MultiplicativeLR, SequentialLR, StepLR, MultiStepLR, ConstantLR, LinearLR, ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, LRScheduler, CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR, ChainedScheduler, PolynomialLR, EPOCH_DEPRECATION_WARNING, ) from torch.optim.swa_utils import AveragedModel, SWALR, update_bn from torch.testing._internal.common_utils import ( TestCase, run_tests, TEST_WITH_UBSAN, load_tests, parametrize, instantiate_parametrized_tests, gradcheck, skipIfRocm, skipIfTorchDynamo ) from torch.testing._internal.common_cuda import TEST_MULTIGPU, TEST_CUDA from typing import Dict, Any, Tuple from torch.optim.optimizer import register_optimizer_step_pre_hook, register_optimizer_step_post_hook from unittest.mock import patch load_tests = load_tests from torch.optim import adam, adamw class TestLRScheduler(TestCase): import gc import gc import weakref import types import types from torch.nn import Parameter import gc import weakref from torch.optim import (adam, adamw, nadam, sgd, radam, rmsprop, rprop, asgd, adamax, adadelta, adagrad)
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted