version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.2 | import os
from torch.utils.data import DataLoader
from continuum.datasets import CIFAR10, InMemoryDataset
from continuum.datasets import MNIST
import torchvision
from continuum.scenarios import TransformationIncremental
import pytest
import numpy as np
from continuum.transforms.bg_swap import BackgroundSwap
DATA_PATH = os.environ.get("CONTINUUM_DATA_PATH")
# Uncomment for debugging via image output
# import matplotlib.pyplot as plt
def test_bg_swap_fast():
"""
Fast test for background swap.
"""
bg_x = np.ones(shape=[2, 5, 5, 3]) * -1
bg_y = np.random.rand(2)
fg = np.random.normal(loc=.5, scale=.1, size=[5, 5])
bg = InMemoryDataset(bg_x, bg_y)
bg_swap = BackgroundSwap(bg, input_dim=(5, 5), normalize_bg=None)
spliced_1_channel = bg_swap(fg)[:, :, 0]
assert np.array_equal((spliced_1_channel <= -1), (fg <= .5))
@pytest.mark.slow
def test_background_swap_numpy():
"""
Test background swap on a single ndarray input.
"""
mnist = MNIST(DATA_PATH, download=True, train=True)
cifar = CIFAR10(DATA_PATH, download=True, train=True)
bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))
im = mnist.get_data()[0][0]
im = bg_swap(im)
# Uncomment for debugging
# plt.imshow(im, interpolation='nearest')
# plt.show()
@pytest.mark.slow
def test_background_swap_torch():
"""
Test background swap on a single tensor input.
"""
cifar = CIFAR10(DATA_PATH, download=True, train=True)
mnist = torchvision.datasets.MNIST(DATA_PATH, train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
]))
bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))
im = mnist[0][0]
im = bg_swap(im)
# Uncomment for debugging
# plt.imshow(im.permute(1, 2, 0), interpolation='nearest')
# plt.show()
@pytest.mark.slow
def test_background_tranformation():
"""
Example code using TransformationIncremental to create a setting with 3 tasks.
"""
cifar = CIFAR10(DATA_PATH, train=True)
mnist = MNIST(DATA_PATH, download=False, train=True)
nb_task = 3
list_trsf = []
for i in range(nb_task):
list_trsf.append([torchvision.transforms.ToTensor(), BackgroundSwap(cifar, bg_label=i, input_dim=(28, 28)),
torchvision.transforms.ToPILImage()])
scenario = TransformationIncremental(mnist, base_transformations=[torchvision.transforms.ToTensor()],
incremental_transformations=list_trsf)
folder = "tests/samples/background_trsf/"
if not os.path.exists(folder):
os.makedirs(folder)
for task_id, task_data in enumerate(scenario):
task_data.plot(path=folder, title=f"background_{task_id}.jpg", nb_samples=100, shape=[28, 28, 3])
loader = DataLoader(task_data)
_, _, _ = next(iter(loader))
| [
"torch.utils.data.DataLoader"
] | 1.2.0 | pclucas14/continuum | 09034db1371e9646ca660fd4d4df73e61bf77067 |
1.8 | """Timer class based on the timeit.Timer class, but torch aware."""
import enum
import timeit
import textwrap
from typing import Any, Callable, Dict, List, NoReturn, Optional, Type, Union
import numpy as np
import torch
from torch.utils.benchmark.utils import common, cpp_jit
from torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType
from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface
__all__ = ["Timer", "timer", "Language"]
if torch.has_cuda and torch.cuda.is_available():
def timer() -> float:
torch.cuda.synchronize()
return timeit.default_timer()
else:
timer = timeit.default_timer
class Language(enum.Enum):
PYTHON = 0
CPP = 1
class CPPTimer:
def __init__(
self,
stmt: str,
setup: str,
timer: Callable[[], float],
globals: Dict[str, Any],
) -> None:
if timer is not timeit.default_timer:
raise NotImplementedError(
"PyTorch was built with CUDA and a GPU is present; however "
"Timer does not yet support GPU measurements. If your "
"code is CPU only, pass `timer=timeit.default_timer` to the "
"Timer's constructor to indicate this. (Note that this will "
"produce incorrect results if the GPU is in fact used, as "
"Timer will not synchronize CUDA.)"
)
if globals:
raise ValueError("C++ timing does not support globals.")
self._stmt: str = textwrap.dedent(stmt)
self._setup: str = textwrap.dedent(setup)
self._timeit_module: Optional[TimeitModuleType] = None
def timeit(self, number: int) -> float:
if self._timeit_module is None:
self._timeit_module = cpp_jit.compile_timeit_template(
self._stmt,
self._setup,
)
return self._timeit_module.timeit(number)
class Timer(object):
"""Helper class for measuring execution time of PyTorch statements.
For a full tutorial on how to use this class, see:
https://pytorch.org/tutorials/recipes/recipes/benchmark.html
The PyTorch Timer is based on `timeit.Timer` (and in fact uses
`timeit.Timer` internally), but with several key differences:
1) Runtime aware:
Timer will perform warmups (important as some elements of PyTorch are
lazily initialized), set threadpool size so that comparisons are
apples-to-apples, and synchronize asynchronous CUDA functions when
necessary.
2) Focus on replicates:
When measuring code, and particularly complex kernels / models,
run-to-run variation is a significant confounding factor. It is
expected that all measurements should include replicates to quantify
noise and allow median computation, which is more robust than mean.
To that effect, this class deviates from the `timeit` API by
conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`.
(Exact algorithms are discussed in method docstrings.) The `timeit`
method is replicated for cases where an adaptive strategy is not
desired.
3) Optional metadata:
When defining a Timer, one can optionally specify `label`, `sub_label`,
`description`, and `env`. (Defined later) These fields are included in
the representation of result object and by the `Compare` class to group
and display results for comparison.
4) Instruction counts
In addition to wall times, Timer can run a statement under Callgrind
and report instructions executed.
Directly analogous to `timeit.Timer` constructor arguments:
`stmt`, `setup`, `timer`, `globals`
PyTorch Timer specific constructor arguments:
`label`, `sub_label`, `description`, `env`, `num_threads`
Args:
stmt: Code snippet to be run in a loop and timed.
setup: Optional setup code. Used to define variables used in `stmt`
timer:
Callable which returns the current time. If PyTorch was built
without CUDA or there is no GPU present, this defaults to
`timeit.default_timer`; otherwise it will synchronize CUDA before
measuring the time.
globals:
A dict which defines the global variables when `stmt` is being
executed. This is the other method for providing variables which
`stmt` needs.
label:
String which summarizes `stmt`. For instance, if `stmt` is
"torch.nn.functional.relu(torch.add(x, 1, out=out))"
one might set label to "ReLU(x + 1)" to improve readability.
sub_label:
Provide supplemental information to disambiguate measurements
with identical stmt or label. For instance, in our example
above sub_label might be "float" or "int", so that it is easy
to differentiate:
"ReLU(x + 1): (float)"
"ReLU(x + 1): (int)"
when printing Measurements or summarizing using `Compare`.
description:
String to distinguish measurements with identical label and
sub_label. The principal use of `description` is to signal to
`Compare` the columns of data. For instance one might set it
based on the input size to create a table of the form: ::
| n=1 | n=4 | ...
------------- ...
ReLU(x + 1): (float) | ... | ... | ...
ReLU(x + 1): (int) | ... | ... | ...
using `Compare`. It is also included when printing a Measurement.
env:
This tag indicates that otherwise identical tasks were run in
different environments, and are therefore not equivilent, for
instance when A/B testing a change to a kernel. `Compare` will
treat Measurements with different `env` specification as distinct
when merging replicate runs.
num_threads:
The size of the PyTorch threadpool when executing `stmt`. Single
threaded performace is important as both a key inference workload
and a good indicator of intrinsic algorithmic efficiency, so the
default is set to one. This is in contrast to the default PyTorch
threadpool size which tries to utilize all cores.
"""
_timer_cls: Type[TimerClass] = timeit.Timer
def __init__(
self,
stmt: str = "pass",
setup: str = "pass",
timer: Callable[[], float] = timer,
globals: Optional[Dict[str, Any]] = None,
label: Optional[str] = None,
sub_label: Optional[str] = None,
description: Optional[str] = None,
env: Optional[str] = None,
num_threads: int = 1,
language: Union[Language, str] = Language.PYTHON,
):
if not isinstance(stmt, str):
raise ValueError("Currently only a `str` stmt is supported.")
# We copy `globals` to prevent mutations from leaking.
# (For instance, `eval` adds the `__builtins__` key)
self._globals = dict(globals or {})
if language in (Language.PYTHON, "py", "python"):
# Include `torch` if not specified as a convenience feature.
self._globals.setdefault("torch", torch)
self._language: Language = Language.PYTHON
elif language in (Language.CPP, "cpp", "c++"):
assert self._timer_cls is timeit.Timer, "_timer_cls has already been swapped."
self._timer_cls = CPPTimer
setup = ("" if setup == "pass" else setup)
self._language = Language.CPP
else:
raise ValueError(f"Invalid language `{language}`.")
# Convenience adjustment so that multi-line code snippets defined in
# functions do not IndentationError (Python) or look odd (C++). The
# leading newline removal is for the initial newline that appears when
# defining block strings. For instance:
# textwrap.dedent("""
# print("This is a stmt")
# """)
# produces '\nprint("This is a stmt")\n'.
#
# Stripping this down to 'print("This is a stmt")' doesn't change
# what gets executed, but it makes __repr__'s nicer.
stmt = textwrap.dedent(stmt)
stmt = (stmt[1:] if stmt and stmt[0] == "\n" else stmt).rstrip()
setup = textwrap.dedent(setup)
setup = (setup[1:] if setup and setup[0] == "\n" else setup).rstrip()
self._timer = self._timer_cls(
stmt=stmt,
setup=setup,
timer=timer,
globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals),
)
self._task_spec = common.TaskSpec(
stmt=stmt,
setup=setup,
label=label,
sub_label=sub_label,
description=description,
env=env,
num_threads=num_threads,
)
def timeit(self, number: int = 1000000) -> common.Measurement:
"""Mirrors the semantics of timeit.Timer.timeit().
Execute the main statement (`stmt`) `number` times.
https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit
"""
with common.set_torch_threads(self._task_spec.num_threads):
# Warmup
self._timer.timeit(number=max(int(number // 100), 1))
return common.Measurement(
number_per_run=number,
raw_times=[self._timer.timeit(number=number)],
task_spec=self._task_spec
)
def repeat(self, repeat: int = -1, number: int = -1) -> None:
raise NotImplementedError("See `Timer.blocked_autorange.`")
def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None:
raise NotImplementedError("See `Timer.blocked_autorange.`")
def _threaded_measurement_loop(
self,
number: int,
time_hook: Callable[[], float],
stop_hook: Callable[[List[float]], bool],
min_run_time: float,
max_run_time: Optional[float] = None,
callback: Optional[Callable[[int, float], NoReturn]] = None
) -> List[float]:
total_time = 0.0
can_stop = False
times: List[float] = []
with common.set_torch_threads(self._task_spec.num_threads):
while (total_time < min_run_time) or (not can_stop):
time_spent = time_hook()
times.append(time_spent)
total_time += time_spent
if callback:
callback(number, time_spent)
can_stop = stop_hook(times)
if max_run_time and total_time > max_run_time:
break
return times
def _estimate_block_size(self, min_run_time: float) -> int:
with common.set_torch_threads(self._task_spec.num_threads):
# Estimate the block size needed for measurement to be negligible
# compared to the inner loop. This also serves as a warmup.
overhead = np.median([self._timer.timeit(0) for _ in range(5)])
number = 1
while True:
time_taken = self._timer.timeit(number)
relative_overhead = overhead / time_taken
if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000:
break
if time_taken > min_run_time:
break
number *= 10
return number
def adaptive_autorange(
self,
threshold: float = 0.1,
*,
min_run_time: float = 0.01,
max_run_time: float = 10.0,
callback: Optional[Callable[[int, float], NoReturn]] = None,
) -> common.Measurement:
number = self._estimate_block_size(min_run_time=0.05)
def time_hook() -> float:
return self._timer.timeit(number)
def stop_hook(times: List[float]) -> bool:
if len(times) > 3:
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
).meets_confidence(threshold=threshold)
return False
times = self._threaded_measurement_loop(
number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
)
def blocked_autorange(
self,
callback: Optional[Callable[[int, float], NoReturn]] = None,
min_run_time: float = 0.2,
) -> common.Measurement:
"""Measure many replicates while keeping timer overhead to a minimum.
At a high level, blocked_autorange executes the following pseudo-code::
`setup`
total_time = 0
while total_time < min_run_time
start = timer()
for _ in range(block_size):
`stmt`
total_time += (timer() - start)
Note the variable `block_size` in the inner loop. The choice of block
size is important to measurement quality, and must balance two
competing objectives:
1) A small block size results in more replicates and generally
better statistics.
2) A large block size better amortizes the cost of `timer`
invocation, and results in a less biased measurement. This is
important because CUDA syncronization time is non-trivial
(order single to low double digit microseconds) and would
otherwise bias the measurement.
blocked_autorange sets block_size by running a warmup period,
increasing block size until timer overhead is less than 0.1% of
the overall computation. This value is then used for the main
measurement loop.
Returns:
A `Measurement` object that contains measured runtimes and
repetition counts, and can be used to compute statistics.
(mean, median, etc.)
"""
number = self._estimate_block_size(min_run_time)
def time_hook() -> float:
return self._timer.timeit(number)
def stop_hook(times: List[float]) -> bool:
return True
times = self._threaded_measurement_loop(
number, time_hook, stop_hook,
min_run_time=min_run_time,
callback=callback)
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
)
def collect_callgrind(
self,
number: int = 100,
collect_baseline: bool = True
) -> valgrind_timer_interface.CallgrindStats:
"""Collect instruction counts using Callgrind.
Unlike wall times, instruction counts are deterministic
(modulo non-determinism in the program itself and small amounts of
jitter from the Python interpreter.) This makes them ideal for detailed
performance analysis. This method runs `stmt` in a separate process
so that Valgrind can instrument the program. Performance is severely
degraded due to the instrumentation, howevever this is ameliorated by
the fact that a small number of iterations is generally sufficient to
obtain good measurements.
In order to to use this method `valgrind`, `callgrind_control`, and
`callgrind_annotate` must be installed.
Because there is a process boundary between the caller (this process)
and the `stmt` execution, `globals` cannot contain arbitrary in-memory
data structures. (Unlike timing methods) Instead, globals are
restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules
to reduce the surprise factor from serialization and subsequent
deserialization. The `GlobalsBridge` class provides more detail on this
subject. Take particular care with nn.Modules: they rely on pickle and
you may need to add an import to `setup` for them to transfer properly.
By default, a profile for an empty statement will be collected and
cached to indicate how many instructions are from the Python loop which
drives `stmt`.
Returns:
A `CallgrindStats` object which provides instruction counts and
some basic facilities for analyzing and manipulating results.
"""
if not isinstance(self._task_spec.stmt, str):
raise ValueError("`collect_callgrind` currently only supports string `stmt`")
# Check that the statement is valid. It doesn't guarantee success, but it's much
# simpler and quicker to raise an exception for a faulty `stmt` or `setup` in
# the parent process rather than the valgrind subprocess.
self._timer.timeit(1)
is_python = (self._language == Language.PYTHON)
assert is_python or not self._globals
return valgrind_timer_interface.wrapper_singleton().collect_callgrind(
task_spec=self._task_spec,
globals=self._globals,
number=number,
collect_baseline=collect_baseline and is_python,
is_python=is_python)
| [
"torch.cuda.synchronize",
"torch.utils.benchmark.utils.common.TaskSpec",
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.wrapper_singleton",
"torch.utils.benchmark.utils.common.set_torch_threads",
"torch.cuda.is_available",
"torch.utils.benchmark.utils.common.Measurement",
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.CopyIfCallgrind.unwrap_all",
"torch.utils.benchmark.utils.cpp_jit.compile_timeit_template"
] | 1.8.1 | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd |
1.1 |
from .single_stage import SingleStageDetector
from ..registry import DETECTORS
from mmdet.core import bbox2result
import torch.nn as nn
import torch
from .. import builder
import numpy as np
import cv2
from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler
@DETECTORS.register_module
class CSP(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
refine_roi_extractor=None,
refine_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
detached=True,
return_feature_maps=False):
super(CSP, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
if refine_head is not None:
self.refine_roi_extractor = builder.build_roi_extractor(
refine_roi_extractor)
self.refine_head = builder.build_head(refine_head)
self.return_feature_maps = return_feature_maps
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.detached = detached
def show_input_debug(self, img, classification_maps, scale_maps, offset_maps):
img_numpy = img.cpu().numpy().copy()[0]
# img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [8, 16, 32, 64, 128]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
# cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]
cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][:80]
scale_numpy = scale_maps[0][i].cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[0][i].cpu().numpy().copy()[0][:2]
cs, ys, xs = cls_numpy.nonzero()
print(len(ys))
for c, x, y in zip(cs, xs, ys):
cv2.imshow(str(c), classification_maps[0][i].cpu().numpy().copy()[0][80+c])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy - height/2)
top_x = int(realx)
down_y = int(realy + height/2)
down_x = int(realx)
top_left = (int(top_x - height * 0.1), int(top_y))
down_right = (int(down_x + height * 0.1), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
def show_input_debug_caltech(self, img, classification_maps, scale_maps, offset_maps):
for j in range(img.shape[0]):
img_numpy = img.cpu().numpy().copy()[j]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [4]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]
ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]
cv2.imshow('ignore', ignore_numpy)
scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]
ys, xs = cls_numpy.nonzero()
print(len(ys))
for x, y in zip(xs, ys):
# cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy - height/2)
top_x = int(realx)
down_y = int(realy + height/2)
down_x = int(realx)
top_left = (int(top_x - height * 0.1), int(top_y))
down_right = (int(down_x + height * 0.1), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
def show_input_debug_head(self, img, classification_maps, scale_maps, offset_maps):
for j in range(img.shape[0]):
img_numpy = img.cpu().numpy().copy()[j]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [4]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]
ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]
cv2.imshow('ignore', ignore_numpy)
scale_numpy = scale_maps[j][i].exp().cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]
ys, xs = cls_numpy.nonzero()
for x, y in zip(xs, ys):
# cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy)
top_x = int(realx)
down_y = int(realy + height)
down_x = int(realx)
top_left = (int(top_x - height * 0.41/2), int(top_y))
down_right = (int(down_x + height * 0.41/2), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
def show_mot_input_debug(self, img, classification_maps, scale_maps, offset_maps):
for j in range(img.shape[0]):
img_numpy = img.cpu().numpy().copy()[j]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
# img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [4]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
# cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]
cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]
instance_numpy = classification_maps[j][i].cpu().numpy().copy()[0][3]
scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]
ys, xs = cls_numpy.nonzero()
for x, y in zip(xs, ys):
c=0
cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][2])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy - height/2)
top_x = int(realx)
down_y = int(realy + height/2)
down_x = int(realx)
top_left = (int(top_x - height * 0.1), int(top_y))
down_right = (int(down_x + height * 0.1), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)
instance = instance_numpy[y, x]
cv2.putText(img_now, str(instance), top_left, cv2.FONT_HERSHEY_COMPLEX, 1, 255)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
@property
def refine(self):
return hasattr(self, 'refine_head') and self.refine_head is not None
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
classification_maps=None,
scale_maps=None,
offset_maps=None):
# for tracking data which batch is produced by dataset instead of data loader
if type(img) == list:
img=img[0]
img_metas=img_metas[0]
gt_bboxes=gt_bboxes[0]
gt_labels=gt_labels[0]
gt_bboxes_ignore = gt_bboxes_ignore[0]
classification_maps = classification_maps[0]
scale_maps = scale_maps[0]
offset_maps = offset_maps[0]
losses = dict()
x = self.extract_feat(img)
# self.show_input_debug(img, classification_maps, scale_maps, offset_maps)
# self.show_input_debug_caltech(img, classification_maps, scale_maps, offset_maps)
# self.show_mot_input_debug(img, classification_maps, scale_maps, offset_maps)
# self.show_input_debug_head(img, classification_maps, scale_maps, offset_maps)
outs = self.bbox_head(x)
loss_inputs = outs + (gt_bboxes, gt_labels, classification_maps, scale_maps, offset_maps, img_metas, self.train_cfg.csp_head if self.refine else self.train_cfg)
losses_bbox = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(losses_bbox)
if self.refine:
if self.detached:
x = tuple([i.detach() for i in x])
bbox_inputs = outs + (img_metas, self.train_cfg.csp_head, False)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False) # no_strides to not upscale yet
bbox_list = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]
for det_bboxes, det_labels in bbox_list
]
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
if bbox_list[i].shape[0] == 0 or gt_bboxes[i].shape[0] == 0:
continue
bbox = torch.tensor(bbox_list[i]).float().cuda()
assign_result = bbox_assigner.assign(
bbox, gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
bbox,
gt_bboxes[i],
gt_labels[i])
sampling_results.append(sampling_result)
samp_list = [res.bboxes for res in sampling_results]
if len(samp_list) == 0:
losses.update(dict(loss_refine_cls=torch.tensor(0).float().cuda(), acc=torch.tensor(0).float().cuda()))
return losses
rois = bbox2roi(samp_list).float()
if self.refine_head.loss_opinion is not None:
pred_scores = torch.cat([torch.tensor(bbox[:, 4]).float().cuda() for bbox in bbox_list], dim=0)
pred_rois = bbox2roi([torch.tensor(bbox).float().cuda() for bbox in bbox_list])
pred_feats = self.refine_roi_extractor(
x, pred_rois)
pred_scores_refine = self.refine_head(pred_feats)
loss_opinion = self.refine_head.compute_opinion_loss(pred_scores, pred_scores_refine)
losses.update(loss_opinion)
bbox_feats = self.refine_roi_extractor(
x, rois)
cls_score = self.refine_head(bbox_feats)
bbox_targets = self.refine_head.get_target(
sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)
loss_refine = self.refine_head.loss(cls_score,
*bbox_targets[:2])
losses.update(dict(loss_refine_cls=loss_refine["loss_cls"], distL1=loss_refine["dist"]))
return losses
def simple_test_accuracy(self, img, img_meta):
gts = img_meta[0]["gts"]
x = self.extract_feat(img)
if self.detached:
x = (x[0].detach(),)
rois = bbox2roi(gts)
if rois.shape[0] == 0:
return 0, 0
roi_feats = self.refine_roi_extractor(
x, rois)
cls_score = self.refine_head.get_scores(roi_feats)
return (cls_score > 0.5).float().sum(), rois.size(0)
def simple_test(self, img, img_meta, rescale=False, return_id=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg.csp_head if self.refine else self.test_cfg, False) # TODO://Handle rescalling
if self.return_feature_maps:
return self.bbox_head.get_bboxes_features(*bbox_inputs)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False)
im_scale = img_meta[0]["scale_factor"]
if "id" in img_meta[0]:
img_id = img_meta[0]["id"]
else:
img_id = 0
if self.refine:
if self.detached:
x = (x[0].detach(),)
bbox_list = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]
for det_bboxes, det_labels in bbox_list
]
refine_cfg = self.test_cfg.get('rcnn', None)
bbox_list = [torch.tensor(bbox).float().cuda() for bbox in bbox_list]
rois = bbox2roi(bbox_list)
bbox_list = [bbox/im_scale for bbox in bbox_list]
if rois.shape[0] == 0:
cls_score = None
else:
roi_feats = self.refine_roi_extractor(
x, rois)
cls_score = self.refine_head.get_scores(roi_feats)
res_buffer = []
if cls_score is not None:
if refine_cfg is not None:
res_buffer = self.refine_head.suppress_boxes(rois, cls_score, img_meta, cfg=refine_cfg)
else:
res_buffer = self.refine_head.combine_scores(bbox_list, cls_score)
if return_id:
return res_buffer, img_id
return res_buffer
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
if return_id:
return bbox_results[0], img_id
return bbox_results[0]
def foward_features(self, features):
bbox_list = self.bbox_head.get_bboxes(*features)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results[0]
| [
"torch.tensor"
] | 1.1 | mohammedshariqnawaz/Pedestron | 9785feb94f00e07ae24a662525b4678f12d0fdc8 |
1.0 | import torch
from torch import nn
from torch.distributions import MultivariateNormal
class Normal(nn.Module):
def __init__(self, num_vars=100):
super(Normal, self).__init__()
self.num_vars = num_vars
self.means = nn.Parameter(torch.zeros(num_vars))
self.std = nn.Parameter(torch.eye(num_vars))
def log_prob(self, x):
distr = MultivariateNormal(self.means, self.std)
return distr.log_prob(x)
def sample(self, num_samples):
distr = MultivariateNormal(self.means, self.std)
return distr.sample_n(num_samples)
| [
"torch.zeros",
"torch.distributions.MultivariateNormal",
"torch.eye"
] | 1.0.1 | insilicomedicine/TRIP | 5e7b9da298aa47a71c71e1144ff1d8e538dbccaa |
1.0 | import torch
import torch.nn as nn
from torch import autograd
import torch.optim as optim
from ...utils import TrainStats
class WGAN(nn.Module):
def __init__(self, gen, discr, prior, n_critic=5, gamma=1, gp=True,
device='cpu'):
super(WGAN, self).__init__()
self.gen = gen
self.discr = discr
self.prior = prior
self.gamma = gamma
self.n_critic = n_critic
self.gp = gp
self.device = device
def get_losses(self, x, compute_reinforce=False):
# get generator samples
sampled_latents = self.prior.sample(x.shape[0])
sampled_latents = sampled_latents.detach()
sampled_images = self.gen(sampled_latents)
# get discriminator outputs
real_discr = self.discr(x)
fake_discr = self.discr(sampled_images)
# compute gradient penalties
if self.gp:
alphas = torch.rand(x.shape[0], 1, 1, 1).repeat(1, x.shape[1],
x.shape[2],
x.shape[3])
alphas = alphas.to(self.device)
int_points = alphas * sampled_images + (1 - alphas) * x
int_points_discr = self.discr(int_points)
gradients = autograd.grad(outputs=int_points_discr, inputs=int_points,
grad_outputs=torch.ones(
int_points_discr.size()).to(self.device),
create_graph=True, retain_graph=True,
only_inputs=True)[0]
grad_norm = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
# compute reinforce loss
if compute_reinforce:
rews = (fake_discr - fake_discr.mean()).detach()
rews = rews / rews.std()
lp_loss = -(rews * self.prior.log_prob(sampled_latents)).mean()
else:
lp_loss = torch.zeros(1).mean()
# compute losses
gen_loss = -fake_discr.mean()
discr_loss = -(
real_discr.mean() - fake_discr.mean())
if self.gp:
discr_loss = discr_loss + self.gamma * grad_norm
return gen_loss, \
discr_loss, \
lp_loss, \
{
'gen_loss': gen_loss.detach().cpu().numpy(),
'discr_loss': discr_loss.detach().cpu().numpy(),
'lp_loss': lp_loss.detach().cpu().numpy(),
'grad_norm': grad_norm.detach().cpu().numpy()
}
def make_training(self, train_loader, global_stats=None, num_iterations=20000, verbose_step=50,
train_lp=True, lr=1e-4, lp_lr=1e-4):
gen_optimizer = optim.Adam(self.gen.parameters(), lr=lr, betas=(0.5, .9))
discr_optimizer = optim.Adam(self.discr.parameters(), lr=lr,
betas=(0.5, .9))
lp_optimizer = optim.Adam(self.prior.parameters(), lr=lp_lr)
local_stats = TrainStats()
cur_iteration = 0
epoch_i = 0
while cur_iteration < num_iterations:
i = 0
print("Epoch", epoch_i, ":")
for x_batch, _ in train_loader:
x_batch = x_batch.to(self.device)
print("!", end='')
i += 1
gen_loss, discr_loss, lp_loss, cur_stats = self.get_losses(
x_batch, (i % self.n_critic == 0) and train_lp)
local_stats.update(cur_stats)
if global_stats is not None:
global_stats.update(cur_stats)
if i % self.n_critic == 0:
gen_optimizer.zero_grad()
gen_loss.backward()
gen_optimizer.step()
if train_lp:
lp_optimizer.zero_grad()
lp_loss.backward()
lp_optimizer.step()
self.prior.stabilize()
else:
discr_optimizer.zero_grad()
discr_loss.backward()
discr_optimizer.step()
cur_iteration += 1
if cur_iteration >= num_iterations:
break
if i % verbose_step == 0:
local_stats.print()
local_stats.reset()
i = 0
epoch_i += 1
if i > 0:
local_stats.print()
local_stats.reset()
return global_stats
def sample(self, num_samples):
z = self.prior.sample(num_samples)
samples = self.gen(z)
return samples.detach().cpu().numpy() | [
"torch.zeros",
"torch.rand"
] | 1.0.1 | insilicomedicine/TRIP | 5e7b9da298aa47a71c71e1144ff1d8e538dbccaa |
1.8 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as td
class Flow(nn.Module):
"""
Building both normalizing flows and neural flows.
Example:
>>> import stribor as st
>>> torch.manual_seed(123)
>>> dim = 2
>>> flow = st.Flow(st.UnitNormal(dim), [st.Affine(dim)])
>>> x = torch.rand(1, dim)
>>> y, ljd = flow(x)
>>> y_inv, ljd_inv = flow.inverse(y)
Args:
base_dist (Type[torch.distributions]): Base distribution
transforms (List[st.flows]): List of invertible transformations
"""
def __init__(self, base_dist=None, transforms=[]):
super().__init__()
self.base_dist = base_dist
self.transforms = nn.ModuleList(transforms)
def forward(self, x, latent=None, mask=None, t=None, reverse=False, **kwargs):
"""
Args:
x (tensor): Input sampled from base density with shape (..., dim)
latent (tensor, optional): Conditional vector with shape (..., latent_dim)
Default: None
mask (tensor): Masking tensor with shape (..., 1)
Default: None
t (tensor, optional): Flow time end point. Default: None
reverse (bool, optional): Whether to perform an inverse. Default: False
Returns:
y (tensor): Output that follows target density (..., dim)
log_jac_diag (tensor): Log-Jacobian diagonal (..., dim)
"""
transforms = self.transforms[::-1] if reverse else self.transforms
_mask = 1 if mask is None else mask
log_jac_diag = torch.zeros_like(x).to(x)
for f in transforms:
if reverse:
x, ld = f.inverse(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
else:
x, ld = f.forward(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
log_jac_diag += ld * _mask
return x, log_jac_diag
def inverse(self, y, latent=None, mask=None, t=None, **kwargs):
""" Inverse of forward function with the same arguments. """
return self.forward(y, latent=latent, mask=mask, t=t, reverse=True, **kwargs)
def log_prob(self, x, **kwargs):
"""
Calculates log-probability of a sample.
Args:
x (tensor): Input with shape (..., dim)
Returns:
log_prob (tensor): Log-probability of the input with shape (..., 1)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need log-probability')
x, log_jac_diag = self.inverse(x, **kwargs)
log_prob = self.base_dist.log_prob(x) + log_jac_diag.sum(-1)
return log_prob.unsqueeze(-1)
def sample(self, num_samples, latent=None, mask=None, **kwargs):
"""
Transforms samples from the base to the target distribution.
Uses reparametrization trick.
Args:
num_samples (tuple or int): Shape of samples
latent (tensor): Latent conditioning vector with shape (..., latent_dim)
Returns:
x (tensor): Samples from target distribution with shape (*num_samples, dim)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need sampling')
if isinstance(num_samples, int):
num_samples = (num_samples,)
x = self.base_dist.rsample(num_samples)
x, log_jac_diag = self.forward(x, **kwargs)
return x
| [
"torch.zeros_like",
"torch.nn.ModuleList"
] | 1.8.0 | mbilos/stribor | 76082c255653d6bd8d506519223183e5d8395578 |
1.8 | import torch
import torch.nn as nn
import torch.nn.functional as F
def diff(x, dim=-1):
"""
Inverse of x.cumsum(dim=dim).
Compute differences between subsequent elements of the tensor.
Only works on dims -1 and -2.
Args:
x (tensor): Input of arbitrary shape
Returns:
diff (tensor): Result with the same shape as x
"""
if dim == 1:
if x.dim() == 2:
dim = -1
elif x.dim() == 3:
dim = -2
else:
raise ValueError('If dim=1, tensor must have 2 or 3 dimensions')
if dim == 2:
if x.dim() == 3:
dim = -1
elif x.dim() == 4:
dim = -2
else:
raise ValueError('If dim=2, tensor should have 3 or 4 dimensions')
if dim == -1:
return x - F.pad(x, (1, 0))[..., :-1]
elif dim == -2:
return x - F.pad(x, (0, 0, 1, 0))[..., :-1, :]
else:
raise ValueError("dim must be equal to -1 or -2")
class Cumsum(nn.Module):
"""
Compute cumulative sum along the specified dimension of the tensor.
Example:
>>> f = stribor.Cumsum(-1)
>>> f(torch.ones(1, 4))
(tensor([[1., 2., 3., 4.]]), tensor([[0., 0., 0., 0.]]))
Args:
dim (int): Tensor dimension over which to perform the summation. Options: -1 or -2.
"""
def __init__(self, dim):
super().__init__()
assert dim in [-1, -2], '`dim` must be either `-1` or `-2`'
self.dim = dim
def forward(self, x, **kwargs):
y = x.cumsum(self.dim)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = diff(y, self.dim)
return x, torch.zeros_like(x)
class Diff(nn.Module):
"""
Inverse of Cumsum transformation.
Args:
dim (int): Tensor dimension over which to perform the diff. Options: -1 or -2.
"""
def __init__(self, dim):
super().__init__()
self.base_flow = Cumsum(dim)
def forward(self, x, **kwargs):
return self.base_flow.inverse(x, **kwargs)
def inverse(self, x, **kwargs):
return self.base_flow.forward(x, **kwargs)
class CumsumColumn(nn.Module):
"""
Cumulative sum along the specific column in (..., M, N) matrix.
Example:
>>> f = stribor.CumsumColumn(1)
>>> f(torch.ones(3, 3))[0]
tensor([[1., 1., 1.],
[1., 2., 1.],
[1., 3., 1.]])
Args:
column (int): Column in the (batched) matrix (..., M, N) over which to
perform the summation
"""
def __init__(self, column):
super().__init__()
self.column = column
def forward(self, x, **kwargs):
y = x.clone()
y[..., self.column] = y[..., self.column].cumsum(-1)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = y.clone()
x[..., self.column] = diff(x[..., self.column], -1)
return x, torch.zeros_like(x)
class DiffColumn(nn.Module):
def __init__(self, column):
super().__init__()
self.base_flow = CumsumColumn(column)
def forward(self, x, **kwargs):
return self.base_flow.inverse(x, **kwargs)
def inverse(self, x, **kwargs):
return self.base_flow.forward(x, **kwargs)
| [
"torch.zeros_like",
"torch.nn.functional.pad"
] | 1.8.0 | mbilos/stribor | 76082c255653d6bd8d506519223183e5d8395578 |
3 | import sys
import math
import os
import torch
import torchvision
import numpy as np
from pkg_resources import resource_stream
def interpolate1d(x, values, tangents):
'''
Returns:
Returns the interpolated or extrapolated values for each query point,
depending on whether or not the query lies within the span of the spline.
'''
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert values.dtype == float_dtype
assert tangents.dtype == float_dtype
assert len(values.shape) == 1
assert len(tangents.shape) == 1
assert values.shape[0] == tangents.shape[0]
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),
values.shape[0] - 2)).type(torch.int64)
x_hi = x_lo + 1
# Compute the relative distance between each `x` and the knot below it.
t = x - x_lo.type(float_dtype)
# Compute the cubic hermite expansion of `t`.
t_sq = t**2
t_cu = t * t_sq
h01 = -2. * t_cu + 3. * t_sq
h00 = 1. - h01
h11 = t_cu - t_sq
h10 = h11 - t_sq + t
# Linearly extrapolate above and below the extents of the spline for all
# values.
value_before = tangents[0] * t + values[0]
value_after = tangents[-1] * (t - 1.) + values[-1]
# Cubically interpolate between the knots below and above each query point.
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = (
neighbor_values_lo * h00 + neighbor_values_hi * h01 +
neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)
return torch.where(t < 0., value_before,
torch.where(t > 1., value_after, value_mid))
def log_safe(x):
x = torch.as_tensor(x)
return torch.log(torch.min(x, torch.tensor(33e37).to(x)))
def load_spline_params():
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, '../misc/partition_spline.npz'), "rb") as spline_file:
with np.load(spline_file, allow_pickle=False) as f:
spline_x_scale = torch.tensor(f['x_scale'])
spline_values = torch.tensor(f['values'])
spline_tangents = torch.tensor(f['tangents'])
return spline_x_scale, spline_values, spline_tangents
def get_partition_init(shape):
shape = torch.as_tensor(shape)
base1 = (2.25 * shape - 4.5) / (torch.abs(shape - 2) + 0.25) + shape + 2
base2 = 5. / 18. * log_safe(4 * shape - 15) + 8
return torch.where(shape < 4, base1, base2)
def get_partition(shape):
shape = torch.as_tensor(shape)
assert (shape >= 0).all()
init = get_partition_init(shape)
x_scale, values, tangents = load_spline_params()
return interpolate1d(init * x_scale.to(init), values.to(init), tangents.to(init))
def general_adaptive_loss(x, shape, bowl=1.):
input_shape = x.shape
shape = torch.as_tensor(shape).to(x.device)
bowl = torch.as_tensor(bowl).to(x.device)
b = x.size(0)
x = x.view(b, -1)
if len(shape.shape) == 0:
shape = shape.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)
else:
shape = shape.view(b, -1)
if len(bowl.shape) == 0:
bowl = bowl.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)
else:
bowl = bowl.view(b, -1)
partition = get_partition(shape)
ans = (torch.abs(shape - 2)/shape) * (torch.pow((torch.square(x/bowl) /
torch.abs(shape - 2) + 1), shape/2) - 1) + log_safe(bowl) + log_safe(partition)
return ans.view(input_shape)
| [
"torch.is_tensor",
"torch.square",
"torch.abs",
"torch.tensor",
"torch.as_tensor",
"torch.where"
] | 3 | jmendozais/SDSSDepth | 7a4d0c5affef3eda7056876ccb2365ac883c08eb |
1.8 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import sys
import unittest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from opacus import PrivacyEngine
from opacus.distributed import DifferentiallyPrivateDistributedDataParallel as DPDDP
from torch.nn.parallel import DistributedDataParallel as DDP
PRIVACY_ALPHAS = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))
def setup_and_get_device(rank, world_size, nonce=0):
"""
Initialize the torch.distributed process group.
If you run multiple groups in parallel or if you have zombie processes, you can add a nonce to avoid errors.
"""
device = 0
if sys.platform == "win32":
# Distributed package only covers collective communications with Gloo
# backend and FileStore on Windows platform. Set init_method parameter
# in init_process_group to a local file.
# Example init_method="file:///f:/libtmp/some_file"
init_method = "file:///{your local file path}"
# initialize the process group
dist.init_process_group(
"gloo", init_method=init_method, rank=rank, world_size=world_size
)
device = rank
elif os.environ.get("SLURM_NTASKS") is not None:
# Running on a Slurm cluster
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(7440 + nonce)
local_rank = int(os.environ.get("SLURM_LOCALID"))
dist.init_process_group(backend="gloo", rank=rank, world_size=world_size)
# The device is the local rank (if you have 2 nodes with 8 GPUs each, you will have two "cuda:0" devices)
device = local_rank
else:
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
dist.init_process_group(
init_method="env://",
backend="nccl",
)
# Single node experiment
device = rank
return device
def cleanup():
dist.destroy_process_group()
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
def demo_basic(rank, world_size, weight, dp, noise_multiplier=0, max_grad_norm=1e8):
# We don't want the 2 GPUs to work on the same examples/labels in parallel
torch.manual_seed(rank)
batch_size = 32
withdp = "with" + ("out " if not dp else "")
print(f"Running basic DDP {withdp} differential privacy example on rank {rank}.")
device = setup_and_get_device(rank, world_size)
# create model and move it to GPU with id rank
model = ToyModel().to(device)
print(f"Initial weight: {model.net1.weight.data}")
# Freeze all the parameters except one, to ensure that the noise is the same
# (the DDP hook does not browse the layers in the same order as the naive implementation)
model.net1.bias.requires_grad = False
model.net2.bias.requires_grad = False
model.net2.weight.requires_grad = False
if dp:
ddp_model = DPDDP(model)
engine = PrivacyEngine(
ddp_model,
batch_size=batch_size,
sample_size=10 * batch_size,
alphas=PRIVACY_ALPHAS,
noise_multiplier=noise_multiplier,
max_grad_norm=[max_grad_norm],
)
engine.random_number_generator = engine._set_seed(0)
else:
ddp_model = DDP(model, device_ids=[device])
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=1)
if dp:
engine.attach(optimizer)
optimizer.zero_grad()
labels = torch.randn(batch_size, 5).to(device)
outputs = ddp_model(torch.randn(batch_size, 10).to(device))
loss_fn(outputs, labels).backward()
optimizer.step()
weight.copy_(model.net1.weight.data.cpu())
cleanup()
def demo_ddp_hook(rank, world_size, weight, dp, noise_multiplier, max_grad_norm):
torch.manual_seed(rank)
batch_size = 32
withdp = "with" + ("out " if not dp else "")
print(f"Running DDP hook {withdp} differential privacy example on rank {rank}.")
device = setup_and_get_device(rank, world_size, nonce=1)
# create model and move it to GPU with id rank
model = ToyModel().to(device)
model.net1.bias.requires_grad = False
model.net2.bias.requires_grad = False
model.net2.weight.requires_grad = False
ddp_model = DDP(model, device_ids=[device])
if dp:
engine = PrivacyEngine(
ddp_model,
batch_size=batch_size,
sample_size=10 * batch_size,
alphas=PRIVACY_ALPHAS,
noise_multiplier=noise_multiplier,
max_grad_norm=[max_grad_norm],
)
engine.random_number_generator = engine._set_seed(0)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=1)
if dp:
engine.attach(optimizer)
optimizer.zero_grad()
labels = torch.randn(batch_size, 5).to(device)
outputs = ddp_model(torch.randn(batch_size, 10).to(device))
loss_fn(outputs, labels).backward()
optimizer.step()
weight.copy_(model.net1.weight.data.cpu())
del ddp_model
cleanup()
def add_remove_ddp_hooks(
rank, world_size, remaining_hooks, dp, noise_multiplier=0, max_grad_norm=1e8
):
device = setup_and_get_device(rank, world_size, nonce=2)
model = ToyModel().to(device)
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=[device])
engine = PrivacyEngine(
ddp_model,
batch_size=1,
sample_size=10,
alphas=PRIVACY_ALPHAS,
noise_multiplier=noise_multiplier,
max_grad_norm=[max_grad_norm],
)
optimizer = optim.SGD(ddp_model.parameters(), lr=1)
engine.attach(optimizer)
remaining_hooks["attached"] = {
p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks
}
engine.detach()
remaining_hooks["detached"] = {
p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks
}
cleanup()
def debug(rank, world_size, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):
local_rank = setup_and_get_device(rank, world_size)
print(f"Rank: {rank},World size: {world_size}, local_rank: {local_rank}")
tensor = tensor.to(local_rank)
print(f"dp: {dp}")
print(tensor)
cleanup()
def run_function(local_function, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):
if os.environ.get("SLURM_NTASKS") is not None:
world_size = int(os.environ.get("SLURM_NTASKS"))
rank = int(os.environ.get("SLURM_PROCID"))
print(f"Running on a Slurm cluster with {world_size} tasks.")
local_function(rank, world_size, tensor, dp, noise_multiplier, max_grad_norm)
else:
world_size = torch.cuda.device_count()
print(f"Spawning multiple processes on a local machine with {world_size} GPUs")
# The rank will be passed as the first argument
mp.spawn(
local_function,
args=(
world_size,
tensor,
dp,
noise_multiplier,
max_grad_norm,
),
nprocs=world_size,
join=True,
)
return world_size
class GradientComputationTest(unittest.TestCase):
def test_connection(self):
tensor = torch.zeros(10, 10)
world_size = run_function(debug, tensor, dp=True)
self.assertTrue(
world_size >= 2, f"Need at least 2 gpus but was provided only {world_size}."
)
def test_gradient_noclip_zeronoise(self):
# Tests that gradient is the same with DP or with DDP
weight_dp, weight_nodp = torch.zeros(10, 10), torch.zeros(10, 10)
run_function(demo_basic, weight_dp, dp=True)
run_function(demo_basic, weight_nodp, dp=False)
self.assertTrue(torch.norm(weight_dp - weight_nodp) < 1e-7)
def test_ddp_hook(self):
# Tests that the DDP hook does the same thing as naive aggregation with per layer clipping
weight_ddp_naive, weight_ddp_hook = torch.zeros(10, 10), torch.zeros(10, 10)
run_function(
demo_basic,
weight_ddp_naive,
dp=True,
noise_multiplier=0.1,
max_grad_norm=1.0,
)
run_function(
demo_ddp_hook,
weight_ddp_hook,
dp=True,
noise_multiplier=0.1,
max_grad_norm=1.0,
)
self.assertTrue(
torch.norm(weight_ddp_naive - weight_ddp_hook) < 1e-7,
f"DDP naive: {weight_ddp_naive}\nDDP hook: {weight_ddp_hook}",
)
def test_add_remove_ddp_hooks(self):
remaining_hooks = {
"attached": None,
"detached": None,
}
run_function(
add_remove_ddp_hooks,
remaining_hooks,
dp=True,
noise_multiplier=0.1,
max_grad_norm=1.0,
)
assert remaining_hooks["attached"], "There are no hooks."
assert not remaining_hooks[
"detached"
], f"Some hooks remain after .remove_hooks(): {remaining_hooks}"
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.MSELoss",
"torch.distributed.destroy_process_group",
"torch.distributed.init_process_group",
"torch.norm",
"torch.multiprocessing.spawn",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.randn"
] | 1.8 | RQuispeC/opacus | 5c83d59fc169e93667946204f7a6859827a38ace |
1.4 | # Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import argparse
import sklearn.metrics as metrics
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
import MinkowskiEngine as ME
from examples.pointnet import (
PointNet,
MinkowskiPointNet,
CoordinateTransformation,
ModelNet40H5,
stack_collate_fn,
minkowski_collate_fn,
)
from examples.common import seed_all
parser = argparse.ArgumentParser()
parser.add_argument("--voxel_size", type=float, default=0.05)
parser.add_argument("--max_steps", type=int, default=100000)
parser.add_argument("--val_freq", type=int, default=1000)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--lr", default=1e-1, type=float)
parser.add_argument("--weight_decay", type=float, default=1e-4)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--stat_freq", type=int, default=100)
parser.add_argument("--weights", type=str, default="modelnet.pth")
parser.add_argument("--seed", type=int, default=777)
parser.add_argument("--translation", type=float, default=0.2)
parser.add_argument("--test_translation", type=float, default=0.0)
parser.add_argument(
"--network",
type=str,
choices=["pointnet", "minkpointnet", "minkfcnn", "minksplatfcnn"],
default="minkfcnn",
)
class MinkowskiFCNN(ME.MinkowskiNetwork):
def __init__(
self,
in_channel,
out_channel,
embedding_channel=1024,
channels=(32, 48, 64, 96, 128),
D=3,
):
ME.MinkowskiNetwork.__init__(self, D)
self.network_initialization(
in_channel,
out_channel,
channels=channels,
embedding_channel=embedding_channel,
kernel_size=3,
D=D,
)
self.weight_initialization()
def get_mlp_block(self, in_channel, out_channel):
return nn.Sequential(
ME.MinkowskiLinear(in_channel, out_channel, bias=False),
ME.MinkowskiBatchNorm(out_channel),
ME.MinkowskiLeakyReLU(),
)
def get_conv_block(self, in_channel, out_channel, kernel_size, stride):
return nn.Sequential(
ME.MinkowskiConvolution(
in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride,
dimension=self.D,
),
ME.MinkowskiBatchNorm(out_channel),
ME.MinkowskiLeakyReLU(),
)
def network_initialization(
self,
in_channel,
out_channel,
channels,
embedding_channel,
kernel_size,
D=3,
):
self.mlp1 = self.get_mlp_block(in_channel, channels[0])
self.conv1 = self.get_conv_block(
channels[0],
channels[1],
kernel_size=kernel_size,
stride=1,
)
self.conv2 = self.get_conv_block(
channels[1],
channels[2],
kernel_size=kernel_size,
stride=2,
)
self.conv3 = self.get_conv_block(
channels[2],
channels[3],
kernel_size=kernel_size,
stride=2,
)
self.conv4 = self.get_conv_block(
channels[3],
channels[4],
kernel_size=kernel_size,
stride=2,
)
self.conv5 = nn.Sequential(
self.get_conv_block(
channels[1] + channels[2] + channels[3] + channels[4],
embedding_channel // 4,
kernel_size=3,
stride=2,
),
self.get_conv_block(
embedding_channel // 4,
embedding_channel // 2,
kernel_size=3,
stride=2,
),
self.get_conv_block(
embedding_channel // 2,
embedding_channel,
kernel_size=3,
stride=2,
),
)
self.pool = ME.MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)
self.global_max_pool = ME.MinkowskiGlobalMaxPooling()
self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()
self.final = nn.Sequential(
self.get_mlp_block(embedding_channel * 2, 512),
ME.MinkowskiDropout(),
self.get_mlp_block(512, 512),
ME.MinkowskiLinear(512, out_channel, bias=True),
)
# No, Dropout, last 256 linear, AVG_POOLING 92%
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu")
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def forward(self, x: ME.TensorField):
x = self.mlp1(x)
y = x.sparse()
y = self.conv1(y)
y1 = self.pool(y)
y = self.conv2(y1)
y2 = self.pool(y)
y = self.conv3(y2)
y3 = self.pool(y)
y = self.conv4(y3)
y4 = self.pool(y)
x1 = y1.slice(x)
x2 = y2.slice(x)
x3 = y3.slice(x)
x4 = y4.slice(x)
x = ME.cat(x1, x2, x3, x4)
y = self.conv5(x.sparse())
x1 = self.global_max_pool(y)
x2 = self.global_avg_pool(y)
return self.final(ME.cat(x1, x2)).F
class GlobalMaxAvgPool(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.global_max_pool = ME.MinkowskiGlobalMaxPooling()
self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()
def forward(self, tensor):
x = self.global_max_pool(tensor)
y = self.global_avg_pool(tensor)
return ME.cat(x, y)
class MinkowskiSplatFCNN(MinkowskiFCNN):
def __init__(
self,
in_channel,
out_channel,
embedding_channel=1024,
channels=(32, 48, 64, 96, 128),
D=3,
):
MinkowskiFCNN.__init__(
self, in_channel, out_channel, embedding_channel, channels, D
)
def forward(self, x: ME.TensorField):
x = self.mlp1(x)
y = x.splat()
y = self.conv1(y)
y1 = self.pool(y)
y = self.conv2(y1)
y2 = self.pool(y)
y = self.conv3(y2)
y3 = self.pool(y)
y = self.conv4(y3)
y4 = self.pool(y)
x1 = y1.interpolate(x)
x2 = y2.interpolate(x)
x3 = y3.interpolate(x)
x4 = y4.interpolate(x)
x = ME.cat(x1, x2, x3, x4)
y = self.conv5(x.sparse())
x1 = self.global_max_pool(y)
x2 = self.global_avg_pool(y)
return self.final(ME.cat(x1, x2)).F
STR2NETWORK = dict(
pointnet=PointNet,
minkpointnet=MinkowskiPointNet,
minkfcnn=MinkowskiFCNN,
minksplatfcnn=MinkowskiSplatFCNN,
)
def create_input_batch(batch, is_minknet, device="cuda", quantization_size=0.05):
if is_minknet:
batch["coordinates"][:, 1:] = batch["coordinates"][:, 1:] / quantization_size
return ME.TensorField(
coordinates=batch["coordinates"],
features=batch["features"],
device=device,
)
else:
return batch["coordinates"].permute(0, 2, 1).to(device)
class CoordinateTranslation:
def __init__(self, translation):
self.trans = translation
def __call__(self, coords):
if self.trans > 0:
coords += np.random.uniform(low=-self.trans, high=self.trans, size=[1, 3])
return coords
def make_data_loader(phase, is_minknet, config):
assert phase in ["train", "val", "test"]
is_train = phase == "train"
dataset = ModelNet40H5(
phase=phase,
transform=CoordinateTransformation(trans=config.translation)
if is_train
else CoordinateTranslation(config.test_translation),
data_root="modelnet40_ply_hdf5_2048",
)
return DataLoader(
dataset,
num_workers=config.num_workers,
shuffle=is_train,
collate_fn=minkowski_collate_fn if is_minknet else stack_collate_fn,
batch_size=config.batch_size,
)
def test(net, device, config, phase="val"):
is_minknet = isinstance(net, ME.MinkowskiNetwork)
data_loader = make_data_loader(
"test",
is_minknet,
config=config,
)
net.eval()
labels, preds = [], []
with torch.no_grad():
for batch in data_loader:
input = create_input_batch(
batch,
is_minknet,
device=device,
quantization_size=config.voxel_size,
)
logit = net(input)
pred = torch.argmax(logit, 1)
labels.append(batch["labels"].cpu().numpy())
preds.append(pred.cpu().numpy())
torch.cuda.empty_cache()
return metrics.accuracy_score(np.concatenate(labels), np.concatenate(preds))
def criterion(pred, labels, smoothing=True):
"""Calculate cross entropy loss, apply label smoothing if needed."""
labels = labels.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, labels.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, labels, reduction="mean")
return loss
def train(net, device, config):
is_minknet = isinstance(net, ME.MinkowskiNetwork)
optimizer = optim.SGD(
net.parameters(),
lr=config.lr,
momentum=0.9,
weight_decay=config.weight_decay,
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=config.max_steps,
)
print(optimizer)
print(scheduler)
train_iter = iter(make_data_loader("train", is_minknet, config))
best_metric = 0
net.train()
for i in range(config.max_steps):
optimizer.zero_grad()
try:
data_dict = train_iter.next()
except StopIteration:
train_iter = iter(make_data_loader("train", is_minknet, config))
data_dict = train_iter.next()
input = create_input_batch(
data_dict, is_minknet, device=device, quantization_size=config.voxel_size
)
logit = net(input)
loss = criterion(logit, data_dict["labels"].to(device))
loss.backward()
optimizer.step()
scheduler.step()
torch.cuda.empty_cache()
if i % config.stat_freq == 0:
print(f"Iter: {i}, Loss: {loss.item():.3e}")
if i % config.val_freq == 0 and i > 0:
torch.save(
{
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"curr_iter": i,
},
config.weights,
)
accuracy = test(net, device, config, phase="val")
if best_metric < accuracy:
best_metric = accuracy
print(f"Validation accuracy: {accuracy}. Best accuracy: {best_metric}")
net.train()
if __name__ == "__main__":
config = parser.parse_args()
seed_all(config.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("===================ModelNet40 Dataset===================")
print(f"Training with translation {config.translation}")
print(f"Evaluating with translation {config.test_translation}")
print("=============================================\n\n")
net = STR2NETWORK[config.network](
in_channel=3, out_channel=40, embedding_channel=1024
).to(device)
print("===================Network===================")
print(net)
print("=============================================\n\n")
train(net, device, config)
accuracy = test(net, device, config, phase="test")
print(f"Test accuracy: {accuracy}")
| [
"torch.nn.init.constant_",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.nn.Module.__init__",
"torch.nn.functional.log_softmax",
"torch.nn.functional.cross_entropy",
"torch.cuda.empty_cache",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.zeros_like",
"torch.argmax"
] | 1.4 | NNstorm/MinkowskiEngine | 443b37a58c379b2482b5d160d9e874b356b4bf2f |
1.4 | # Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import unittest
import numpy as np
import torch
from MinkowskiEngine import (
SparseTensor,
SparseTensorOperationMode,
SparseTensorQuantizationMode,
set_sparse_tensor_operation_mode,
clear_global_coordinate_manager,
is_cuda_available,
)
from MinkowskiEngine.utils import batched_coordinates, sparse_quantize, sparse_collate
from tests.python.common import data_loader, load_file
class SparseTensorTestCase(unittest.TestCase):
def test(self):
print(f"{self.__class__.__name__}: test SparseTensor")
coords, feats, labels = data_loader(nchannel=2)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_empty(self):
print(f"{self.__class__.__name__}: test_empty SparseTensor")
feats = torch.FloatTensor(0, 16)
coords = torch.IntTensor(0, 4)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_tensor_stride(self):
print(f"{self.__class__.__name__}: test_tensor_stride SparseTensor")
feats = torch.FloatTensor(4, 16)
coords = torch.IntTensor(
[[0, 4, 2, 1], [0, 4, 0, 0], [0, 4, 4, 4], [0, 4, 4, 7]]
)
print(coords)
input = SparseTensor(feats, coordinates=coords, tensor_stride=4)
self.assertEqual(input.tensor_stride, [4, 4, 4])
print(input)
def test_force_creation(self):
print(f"{self.__class__.__name__}: test_force_creation")
coords, feats, labels = data_loader(nchannel=2)
input1 = SparseTensor(feats, coordinates=coords)
input2 = SparseTensor(
feats, coordinates=coords, coordinate_manager=input1.coordinate_manager
)
print(input1.coordinate_map_key, input2.coordinate_map_key)
def test_device(self):
print(f"{self.__class__.__name__}: test_device SparseTensor")
if not is_cuda_available():
return
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
SparseTensor(feats.to(0), coords.to(0))
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)
st = SparseTensor(feats, coords, device=feats.device)
print(st)
def test_device_unique(self):
print(f"{self.__class__.__name__}: test_device_unique SparseTensor")
if not is_cuda_available():
return
coords = torch.IntTensor(
[[0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1], [1, 2]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
SparseTensor(feats.to(0), coords.to(0))
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)
st = SparseTensor(feats, coords, device=feats.device)
print(st)
def test_device2(self):
print(f"{self.__class__.__name__}: test_device2 SparseTensor")
if not is_cuda_available():
return
coordinates = np.random.rand(8192,3) * 200
quant_coordinates, quant_features = sparse_quantize(coordinates, coordinates)
bcoords, bfeats = sparse_collate([quant_coordinates], [quant_features])
bcoords, bfeats = bcoords.cuda(), bfeats.cuda()
print(bcoords, bfeats)
SparseTensor(bfeats, bcoords)
def test_quantization(self):
print(f"{self.__class__.__name__}: test_quantization")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_quantization_gpu(self):
print(f"{self.__class__.__name__}: test_quantization_gpu")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
print(input)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
print(sinput)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_extraction(self):
print(f"{self.__class__.__name__}: test_extraction")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords)
C0 = X.coordinates_at(0)
F0 = X.features_at(0)
self.assertTrue(0 in C0)
self.assertTrue(1 in C0)
self.assertTrue(2 in C0)
self.assertTrue(1.1 in F0)
self.assertTrue(2.1 in F0)
self.assertTrue(3.1 in F0)
CC0, FC0 = X.coordinates_and_features_at(0)
self.assertTrue((C0 == CC0).all())
self.assertTrue((F0 == FC0).all())
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
if not is_cuda_available():
return
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords, device=0)
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
def test_features_at_coordinates(self):
print(f"{self.__class__.__name__}: test_features_at_coordinates")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(features=feats, coordinates=coords)
feats = X.features_at_coordinates(
torch.FloatTensor([[0, 0], [0, 1], [0, 2], [2, 2], [0, 0], [0, 0.5]])
).flatten()
self.assertTrue(feats[0] == 1.1)
self.assertTrue(feats[3] == 5.1)
self.assertTrue(feats[4] == 1.1)
def test_decomposition(self):
print(f"{self.__class__.__name__}: test_decomposition")
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats, bcoords)
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_decomposition_gpu(self):
print(f"{self.__class__.__name__}: test_decomposition_gpu")
if not torch.cuda.is_available():
return
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats.to(0), bcoords.to(0))
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_operation_mode(self):
print(f"{self.__class__.__name__}: test_operation_mode")
# Set to use the global sparse tensor coords manager by default
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SHARE_COORDINATE_MANAGER
)
coords, feats, labels = data_loader(nchannel=2)
# Create a sparse tensor on two different coordinates.
A = SparseTensor(torch.rand(feats.shape), coordinates=coords)
B = SparseTensor(
torch.rand(4, 2),
coordinates=torch.IntTensor([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]),
)
self.assertTrue(A.coordinate_manager == B.coordinate_manager)
A.requires_grad_(True)
B.requires_grad_(True)
C = A + B
C.F.sum().backward()
self.assertTrue(torch.all(A.F.grad == 1).item())
self.assertTrue(torch.all(B.F.grad == 1).item())
C = A - B
C = A * B
C = A / B
# Inplace
A.requires_grad_(False)
D = SparseTensor(
torch.rand(feats.shape),
coordinate_map_key=A.coordinate_map_key,
coordinate_manager=A.coordinate_manager,
)
A -= D
A *= D
A /= D
clear_global_coordinate_manager()
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER
)
| [
"torch.rand",
"torch.IntTensor",
"torch.FloatTensor",
"torch.from_numpy",
"torch.all",
"torch.cuda.is_available"
] | 1.4 | NNstorm/MinkowskiEngine | 443b37a58c379b2482b5d160d9e874b356b4bf2f |
1.7 | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/ashkamath/mdetr/blob/main/datasets/gqa.py
"""
import json
from pathlib import Path
import torch
import torchvision
from transformers import RobertaTokenizerFast
from .coco import ConvertCocoPolysToMask, ModulatedDetection, make_coco_transforms
class VQAv2Detection(ModulatedDetection):
pass
class VQAv2QuestionAnswering(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder):
super(VQAv2QuestionAnswering, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer)
with open(ann_folder / "vqa2_answer2id.json", "r") as f:
self.answer2id = json.load(f)
with open(ann_folder / "vqa2_answer2id_by_type.json", "r") as f:
self.answer2id_by_type = json.load(f)
self.type2id = {"yes/no": 0, "number": 1, "other": 2}
def __getitem__(self, idx):
img, target = super(VQAv2QuestionAnswering, self).__getitem__(idx)
image_id = self.ids[idx]
coco_img = self.coco.loadImgs(image_id)[0]
caption = coco_img["caption"]
dataset_name = coco_img["dataset_name"]
questionId = coco_img["questionId"]
target = {"image_id": image_id, "annotations": target, "caption": caption}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
target["dataset_name"] = dataset_name
target["questionId"] = questionId
if coco_img["answer"] not in self.answer2id:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)
target["answer_type"] = torch.as_tensor(self.type2id[coco_img["answer_type"]], dtype=torch.long)
# util.misc.collate_fn requires to put 'answer' before every type of answer in target
if coco_img["answer"] not in self.answer2id_by_type["yes/no"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_yes/no"] = torch.as_tensor(
self.answer2id_by_type["yes/no"][answer] if coco_img["answer_type"] == "yes/no" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["number"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_number"] = torch.as_tensor(
self.answer2id_by_type["number"][answer] if coco_img["answer_type"] == "number" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["other"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_other"] = torch.as_tensor(
self.answer2id_by_type["other"][answer] if coco_img["answer_type"] == "other" else -100,
dtype=torch.long,
)
return img, target
def build(image_set, args):
# TODO: img or all?
img_dir = Path(args.coco_img_path)
assert img_dir.exists(), f"provided COCO img path {img_dir} does not exist"
tokenizer = RobertaTokenizerFast.from_pretrained(args.text_encoder_type)
if args.do_qa:
# Для vqa2 это не нужно:
# assert args.vqa2_split_type is not None
if image_set == "train":
datasets = []
for imset in ["train", "minival"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{imset}.json"
datasets.append(
VQAv2QuestionAnswering(
img_dir / "train2014" if imset == "train" else img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
)
return torch.utils.data.ConcatDataset(datasets)
elif image_set == "val":
# TODO: правильный ли ann_file?
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_minival.json"
return VQAv2QuestionAnswering(
img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
elif image_set in ["test", "testdev", "trainval"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{image_set}.json"
return VQAv2QuestionAnswering(
img_dir / "test2015",
ann_file,
transforms=make_coco_transforms("val", cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
else:
assert False, f"Unknown image set {image_set}"
| [
"torch.utils.data.ConcatDataset",
"torch.as_tensor"
] | 1.7.0 | TopCoder2K/mdetr | aedfd63f550ae36d1477484c489a2aa438d10aa3 |
1.6 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.loss_functions.TopK_loss import TopKLoss
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from torch import nn
import numpy as np
class GDL(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, square_volumes=False):
"""
square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition)
"""
super(GDL, self).__init__()
self.square_volumes = square_volumes
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
gt = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
if not self.do_bg:
x = x[:, 1:]
y_onehot = y_onehot[:, 1:]
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self.square)
# GDL weight computation, we use 1/V
volumes = sum_tensor(y_onehot, axes) + 1e-6 # add some eps to prevent div by zero
if self.square_volumes:
volumes = volumes ** 2
# apply weights
tp = tp / volumes
fp = fp / volumes
fn = fn / volumes
# sum over classes
if self.batch_dice:
axis = 0
else:
axis = 1
tp = tp.sum(axis, keepdim=False)
fp = fp.sum(axis, keepdim=False)
fn = fn.sum(axis, keepdim=False)
# compute dice
dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)
dc = dc.mean()
return -dc
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
class SoftDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
"""
super(SoftDiceLoss, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
nominator = 2 * tp + self.smooth
denominator = 2 * tp + fp + fn + self.smooth
dc = nominator / (denominator + 1e-8)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class MCCLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_mcc=False, do_bg=True, smooth=0.0):
"""
based on matthews correlation coefficient
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
Does not work. Really unstable. F this.
"""
super(MCCLoss, self).__init__()
self.smooth = smooth
self.do_bg = do_bg
self.batch_mcc = batch_mcc
self.apply_nonlin = apply_nonlin
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
voxels = np.prod(shp_x[2:])
if self.batch_mcc:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, tn = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
tp /= voxels
fp /= voxels
fn /= voxels
tn /= voxels
nominator = tp * tn - fp * fn + self.smooth
denominator = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5 + self.smooth
mcc = nominator / denominator
if not self.do_bg:
if self.batch_mcc:
mcc = mcc[1:]
else:
mcc = mcc[:, 1:]
mcc = mcc.mean()
return -mcc
class SoftDiceLossSquared(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super(SoftDiceLossSquared, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
with torch.no_grad():
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
y = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, y, 1).float()
intersect = x * y_onehot
# values in the denominator get smoothed
denominator = x ** 2 + y_onehot ** 2
# aggregation was previously done in get_tp_fp_fn, but needs to be done here now (needs to be done after
# squaring)
intersect = sum_tensor(intersect, axes, False) + self.smooth
denominator = sum_tensor(denominator, axes, False) + self.smooth
dc = 2 * intersect / denominator
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1,
log_dice=False, ignore_label=None):
"""
CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
:param soft_dice_kwargs:
:param ce_kwargs:
:param aggregate:
:param square_dice:
:param weight_ce:
:param weight_dice:
"""
super(DC_and_CE_loss, self).__init__()
if ignore_label is not None:
assert not square_dice, 'not implemented'
ce_kwargs['reduction'] = 'none'
self.log_dice = log_dice
self.weight_dice = weight_dice
self.weight_ce = weight_ce
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.ignore_label = ignore_label
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
"""
target must be b, c, x, y(, z) with c=1
:param net_output:
:param target:
:return:
"""
if self.ignore_label is not None:
assert target.shape[1] == 1, 'not implemented for one hot encoding'
mask = target != self.ignore_label
target[~mask] = 0
mask = mask.float()
else:
mask = None
dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0
if self.log_dice:
dc_loss = -torch.log(-dc_loss)
ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0
if self.ignore_label is not None:
ce_loss *= mask[:, 0]
ce_loss = ce_loss.sum() / mask.sum()
if self.aggregate == "sum":
result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class ATM_and_DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1, weight_atm=0.5,
log_dice=False, ignore_label=None):
"""
CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
:param soft_dice_kwargs:
:param ce_kwargs:
:param aggregate:
:param square_dice:
:param weight_ce:
:param weight_dice:
"""
super(ATM_and_DC_and_CE_loss, self).__init__()
if ignore_label is not None:
assert not square_dice, 'not implemented'
ce_kwargs['reduction'] = 'none'
self.log_dice = log_dice
self.weight_dice = weight_dice
self.weight_ce = weight_ce
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.atm = ATM(apply_nonlin=softmax_helper, weight_atm=weight_atm)
self.ignore_label = ignore_label
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
"""
target must be b, c, x, y(, z) with c=1
:param net_output:
:param target:
:return:
"""
if self.ignore_label is not None:
assert target.shape[1] == 1, 'not implemented for one hot encoding'
mask = target != self.ignore_label
target[~mask] = 0
mask = mask.float()
else:
mask = None
net_output = net_output * self.atm(net_output, target)
dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0
if self.log_dice:
dc_loss = -torch.log(-dc_loss)
ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0
if self.ignore_label is not None:
ce_loss *= mask[:, 0]
ce_loss = ce_loss.sum() / mask.sum()
if self.aggregate == "sum":
result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class ATM(nn.Module):
def __init__(self, apply_nonlin=None, weight_atm=0.5):
"""
"""
super(ATM, self).__init__()
self.apply_nonlin = apply_nonlin
self.weight_atm = weight_atm
def forward(self, x, y):
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
atm = torch.exp((x-y)/self.weight_atm)
return atm
class DC_and_BCE_loss(nn.Module):
def __init__(self, bce_kwargs, soft_dice_kwargs, aggregate="sum"):
"""
DO NOT APPLY NONLINEARITY IN YOUR NETWORK!
THIS LOSS IS INTENDED TO BE USED FOR BRATS REGIONS ONLY
:param soft_dice_kwargs:
:param bce_kwargs:
:param aggregate:
"""
super(DC_and_BCE_loss, self).__init__()
self.aggregate = aggregate
self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)
def forward(self, net_output, target):
ce_loss = self.ce(net_output, target)
dc_loss = self.dc(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class GDL_and_CE_loss(nn.Module):
def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate="sum"):
super(GDL_and_CE_loss, self).__init__()
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.dc = GDL(softmax_helper, **gdl_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class DC_and_topk_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False):
super(DC_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
| [
"torch.zeros",
"torch.unbind",
"torch.no_grad",
"torch.nn.BCEWithLogitsLoss",
"torch.log",
"torch.exp"
] | 1.6.0 | Karol-G/nnUNet | a30bdbd64254c94c515ee03617173eb217eea505 |
1.7 | import torch
from torch.optim import Optimizer
class OptimWrapper(Optimizer):
# Mixin class that defines convenient functions for writing Optimizer Wrappers
def __init__(self, optim):
self.optim = optim
def __getstate__(self):
return self.optim.__getstate__()
def __setstate__(self, state):
self.optim.__setstate__(state)
@property
def state(self):
return self.optim.state
@property
def param_groups(self):
return self.optim.param_groups
@param_groups.setter
def param_groups(self, value):
self.optim.param_groups = value
def state_dict(self):
return self.optim.state_dict()
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict)
def zero_grad(self):
self.optim.zero_grad()
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
@property
def defaults(self):
return self.optim.defaults
@defaults.setter
def defaults(self, defaults):
self.optim.defaults = defaults
@torch.no_grad()
def step(self, closure=None):
self.optim.step(closure=closure)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.optim) | [
"torch.no_grad"
] | 1.7.1 | aknckaan/scrl | bff485e27d8785628e35d2cb73dce06f10065b1f |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CUTOFFBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_cutoffbert import CutoffBertConfig
from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings
from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder
from ..bert.modeling_bert import BertPooler as CutoffBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "CutoffBertConfig"
_TOKENIZER_FOR_DOC = "CutoffBertTokenizer"
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class CutoffBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CutoffBertConfig
load_tf_weights = load_tf_weights_in_cutoffbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CUTOFFBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
CUTOFFBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertModel(CutoffBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = CutoffBertEmbeddings(config)
self.encoder = CutoffBertEncoder(config)
self.pooler = CutoffBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled
output) + Cut-off data augmentation support.
""",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_token_id = config.cls_token_id
self.sep_token_id = config.sep_token_id
self.mask_token_id = config.mask_token_id
self.masking_prob = config.cutoff_masking_prob
self.temperature = config.cutoff_temperature
self.mask_loss_wgt = config.cutoff_mask_loss_wgt
self.js_loss_wgt = config.cutoff_js_loss_wgt
self.config = config
self.bert = CutoffBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def _apply_cutoff(self, inputs):
masked_inputs = inputs.clone()
valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)
random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()
masking_indices = random_masking_indices & valid_masking_indices
masked_inputs[masking_indices] = self.mask_token_id
return masked_inputs
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.dropout(outputs[1])
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return SequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, l = input_ids.size()
masked_input_ids = self._apply_cutoff(input_ids.clone())
flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)
flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.dropout(flatten_outputs[1])
flatten_logits = self.classifier(flatten_pooled_output)
logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)
logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:
mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))
loss += mask_loss * self.mask_loss_wgt
if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:
kl_loss_fct = KLDivLoss(reduction="batchmean")
src_logits, trg_logits = logits, masked_logits
mean_logits = (src_logits + trg_logits) * 0.5
src_loss = kl_loss_fct(
F.log_softmax(src_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
trg_loss = kl_loss_fct(
F.log_softmax(trg_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
js_loss = (src_loss + trg_loss) * 0.5
loss += js_loss * self.js_loss_wgt
if not return_dict:
return (loss, logits)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.stack",
"torch.from_numpy",
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.full",
"torch.nn.KLDivLoss",
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss"
] | 1.0 | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PROMPTBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_promptbert import PromptBertConfig
from ..bert.modeling_bert import BertEmbeddings as PromptBertEmbeddings
from ..bert.modeling_bert import BertEncoder as PromptBertEncoder
from ..bert.modeling_bert import BertPooler as PromptBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "PromptBertConfig"
_TOKENIZER_FOR_DOC = "PromptBertTokenizer"
PROMPTBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_promptbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class PromptBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = PromptBertConfig
load_tf_weights = load_tf_weights_in_promptbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
PROMPTBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
PROMPTBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare PromptBert Model transformer outputting raw hidden-states without any specific head on top.",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertModel(PromptBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = PromptBertEmbeddings(config)
self.encoder = PromptBertEncoder(config)
self.pooler = PromptBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
PromptBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForSequenceClassification(PromptBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = PromptBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a dual encoder head on top for passage retrieval tasks (a linear layer on top of the pooled output
for computing source-target similarity).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForDualPassageEncoder(PromptBertPreTrainedModel):
def __init__(self, config, cls_loss_wgt=None):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_loss_wgt = cls_loss_wgt
self.bert = PromptBertModel(config)
self.pooler = PromptBertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.cls_loss_wgt is not None and cls_loss_wgt > 0.0:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, 2, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=DualPassageEncoderModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None or len(input_ids.size()) < 3:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.pooler(outputs[0])
pooled_output = self.dropout(pooled_output)
if not return_dict:
return (pooled_output,) + outputs[2:]
return DualPassageEncoderModelOutput(
pooled_output=pooled_output,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, _, l = input_ids.size()
flatten_input_ids = input_ids.reshape(-1, l)
flatten_attention_mask = attention_mask.reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.pooler(flatten_outputs[0])
src_pooled_output, trg_pooled_output = flatten_pooled_output.reshape(b, 2, self.config.hidden_size).chunk(2, dim=1)
src_pooled_output, trg_pooled_output = src_pooled_output.squeeze(dim=1).contiguous(), trg_pooled_output.squeeze(dim=1).contiguous()
mask = (labels.unsqueeze(-1).expand(-1, b) == labels.unsqueeze(0).expand(b, -1)) & (1 - torch.eye(b)).to(labels.device).bool()
cl_logits = torch.einsum('ik,jk->ij', src_pooled_output, trg_pooled_output).masked_fill(mask, float('-inf'))
cl_labels = torch.arange(b).to(labels.device)
loss_fct = CrossEntropyLoss()
cl_loss = loss_fct(cl_logits.view(-1, labels.size(0)), cl_labels.view(-1))
if self.cls_loss_wgt is not None and self.cls_loss_wgt > 0.0:
flatten_logits = self.classifier(self.dropout(flatten_outputs[1]))
src_logits, trg_logits = flatten_logits.reshape(b, 2, self.num_labels).chunk(2, dim=1)
src_logits, trg_logits = src_logits.squeeze(dim=1).contiguous(), trg_logits.squeeze(dim=1).contiguous()
src_loss = loss_fct(src_logits.view(-1, self.num_labels), labels.view(-1))
trg_loss = loss_fct(trg_logits.view(-1, self.num_labels), labels.view(-1))
cls_loss = src_loss + trg_loss
cls_logits = src_logits + trg_logits
loss = cl_loss + cls_loss * self.cls_loss_wgt
logits = cls_logits
else:
loss = cl_loss
logits = cl_logits
if not return_dict:
return (loss, logits,)
return DualPassageEncoderModelOutput(
loss=loss,
logits=logits,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.einsum",
"torch.arange",
"torch.from_numpy",
"torch.ones",
"torch.eye",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss"
] | 1.0 | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 |
1.5 | import torch
import numpy as np
def get_sigmas(config):
if config.model.sigma_dist == 'geometric':
sigmas = torch.tensor(
np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),
config.model.num_classes))).float().to(config.device)
elif config.model.sigma_dist == 'uniform':
sigmas = torch.tensor(
np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)
).float().to(config.device)
else:
raise NotImplementedError('sigma distribution not supported')
return sigmas
@torch.no_grad()
def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True):
images = []
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c #dummy target 1...T depending on iteration
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
#choose whether to add random noise during each gradient ascent step
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
#calculate l2 norms of gradient (score) and the additive noise for logging
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
#calc snr as scaled version of [||s(x, \sigma_i)|| / ||z_t||] and mean of score for logging
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format(
c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))
#final denoising step if desired - removes the very last additive z_L
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True,
decimate_sigma=None, mode=None, true_x=None):
images = []
#if desired, decimate the number of noise scales to speed up inference
if decimate_sigma is not None:
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() #grab every decimate_sigma'th value except the last one
sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list
# num_sigmas = sigmas.shape[0] // decimate_sigma
# sigmas_temp = []
# for i in range(num_sigmas):
# sigmas_temp.append(sigmas[-1])
sigmas = sigmas_temp #swap the new decimated sigma list for the main one
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=5, stop=1, num=len(sigmas))
c2 = 1
with torch.no_grad():
#outer loop over noise scales
for c, sigma in enumerate(sigmas):
#dummy target 1...T depending on iteration
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
#step_size = step_lr * (sigma / sigmas[-1]) ** 2
step_size = steps[c]
#Inner loop over T
for s in range(n_steps_each):
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels)
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
#calculate the maximum likelihood gradient - i.e. MSE gradient
#A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1]
if mode=='denoising':
Axt = x_mod
mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient
#mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2
if c == 0 and s == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#The final gradient
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2
#choose whether to add random noise during each gradient ascent step
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
#final denoising step if desired - removes the very last additive z_L
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True,
final_only=False, verbose=False, likelihood_every=1,
decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type="l2"):
images = []
#if desired, decimate the number of noise scales to speed up inference
if decimate_sigma is not None:
if sigma_type == 'subsample': #grab equally-spaced sigma values
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist()
sigmas_temp.append(sigmas[-1])
elif sigma_type == 'last': #grab just the last sigma value multiple times
num_sigmas = sigmas.shape[0] // decimate_sigma
sigmas_temp = []
for i in range(num_sigmas):
sigmas_temp.append(sigmas[-1])
else:
sigmas_temp = sigmas
sigmas = sigmas_temp
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas))
likelihood_norm = 0
with torch.no_grad():
if sigma_type == 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099
labels = labels.long()
for c, sigma in enumerate(sigmas):
if sigma_type == 'subsample':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c
labels = labels.long()
elif sigma_type != 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = steps[c]
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels) * c1
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
if c % likelihood_every == 0:
#\grad_x log p(y | x) -- LIKELIHOOD
if mode=='denoising':
Axt = x_mod
if likelihood_type == "l2":
mle_grad = (Axt - y) * c2
elif likelihood_type == "l1":
mle_grad = torch.sign(Axt - y) * c2
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
if likelihood_type == "l2":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2
elif likelihood_type == "l1":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
if auto_c2 and c == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad
#x_mod = torch.clamp(x_mod, 0.0, 1.0)
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.cpu())
if verbose:
print("\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
"""
Currently only good for 32x32 images. Assuming the right half is missing.
"""
images = []
#refer_image is the untainted x (?)
#right now this only works with 3-channel images
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad()
def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
n_rows = x_mod.shape[0]
x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)
x_mod = x_mod.reshape(-1, *x_mod.shape[2:])
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)
noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \
noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]
noise = noise.reshape(-1, *noise.shape[2:])
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print(
"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format(
c, step_size, image_norm.item(), grad_norm.item(), snr.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images | [
"torch.cos",
"torch.nn.MSELoss",
"torch.sin",
"torch.no_grad",
"torch.linspace",
"torch.sign",
"torch.ones",
"torch.randn_like",
"torch.zeros_like",
"torch.transpose",
"torch.randn"
] | 1.5.0 | Sriram-Ravula/ncsnv2 | f610b59441a34063fae1c02aa06837b7eec95c03 |
0.4 | from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
import numpy as np
import warnings
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs, lr_scheduler_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.losses import CrossEntropyLoss, DeepSupervision
from torchreid.utils.iotools import check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger, RankLogger
from torchreid.utils.torchtools import count_num_param, open_all_layers, open_specified_layers, accuracy, \
load_pretrained_weights, save_checkpoint, resume_from_checkpoint
from torchreid.utils.reidtools import visualize_ranked_results
from torchreid.utils.generaltools import set_random_seed
from torchreid.eval_metrics import evaluate
from torchreid.optimizers import init_optimizer
from torchreid.lr_schedulers import init_lr_scheduler
os.environ['TORCH_HOME'] = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.torch'))
testloader_dict = trainloader = criterion = None
use_gpu = False
# global variables
parser = argument_parser()
args = parser.parse_args()
def corr_metric(W: 'K x N'):
G = W.permute(1, 0) @ W
return torch.trace(G) / abs(G).sum()
def replace_weight(layer):
with torch.no_grad():
# NECESSARY! The weight of Linear layer has been transposed!
A = layer.weight.t()
M, N = A.size()
M: 2048
N: 1024
U, S, V = torch.svd(A, some=False)
W = A @ V
W: '2048 x 1024 = M x N'
NW = torch.zeros_like(A)
for i in range(N):
curr_N = W.size(1)
W_norm = torch.norm(W, p=2, dim=0)
W_norm: 'curr_N'
index = i
vec_i = A[:, i]
vec_i_norm = torch.norm(vec_i)
co = (A[:, i].view(M, 1).t() @ W).view(curr_N)
co: 'curr_N'
co = co / vec_i_norm
absco = abs(co / W_norm)
maxco_index = torch.max(absco, 0)[1].item()
NW[:, index] = W[:, maxco_index] * torch.sign(co[maxco_index])
# Remove selected column vector from W
W = W[:, sorted({x for x in range(curr_N) if x != maxco_index})]
layer.weight.copy_(NW.t())
print(layer.weight)
return layer
def main():
global args, criterion, testloader_dict, trainloader, use_gpu
set_random_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'test.log' if args.evaluate else 'train.log'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
else:
warnings.warn('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, testloader_dict = dm.return_dataloaders()
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=dm.num_train_pids, loss={'xent'}, pretrained=not args.no_pretrained, use_gpu=use_gpu)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_pretrained_weights(model, args.load_weights)
model = nn.DataParallel(model).cuda() if use_gpu else model
criterion = CrossEntropyLoss(num_classes=dm.num_train_pids, use_gpu=use_gpu, label_smooth=args.label_smooth)
if args.resume and check_isfile(args.resume):
args.start_epoch = resume_from_checkpoint(args.resume, model, optimizer=None)
resumed = True
else:
resumed = False
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
return
time_start = time.time()
# ranklogger = RankLogger(args.source_names, args.target_names)
print('=> Start training')
if not resumed:
train_base(model)
train_RRI(model, 7)
elapsed = round(time.time() - time_start)
elapsed = str(datetime.timedelta(seconds=elapsed))
print('Elapsed {}'.format(elapsed))
# ranklogger.show_summary()
def train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=False):
losses = AverageMeter()
accs = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
model.train()
# if fixbase or args.always_fixbase:
# open_specified_layers(model, args.open_layers)
# else:
# open_all_layers(model)
end = time.time()
for batch_idx, (imgs, pids, _, _) in enumerate(trainloader):
data_time.update(time.time() - end)
if use_gpu:
imgs, pids = imgs.cuda(), pids.cuda()
outputs = model(imgs)
loss = sum(criterion(x, pids) for x in outputs) / len(outputs)
# if isinstance(outputs, (tuple, list)):
# loss = DeepSupervision(criterion, outputs, pids)
# else:
# loss = criterion(outputs, pids)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), pids.size(0))
accs.update(accuracy(outputs, pids)[0])
if (batch_idx + 1) % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc {acc.val:.2f} ({acc.avg:.2f})\t'.format(
epoch + 1, batch_idx + 1, len(trainloader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
acc=accs
))
end = time.time()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
qf, q_pids, q_camids = [], [], []
for batch_idx, (imgs, pids, camids, _) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
qf.append(features)
q_pids.extend(pids)
q_camids.extend(camids)
qf = torch.cat(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1)))
gf, g_pids, g_camids = [], [], []
end = time.time()
for batch_idx, (imgs, pids, camids, _) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
gf.append(features)
g_pids.extend(pids)
g_camids.extend(camids)
gf = torch.cat(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1)))
print('=> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, args.test_batch_size))
m, n = qf.size(0), gf.size(0)
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(1, -2, qf, gf.t())
distmat = distmat.numpy()
print('Computing CMC and mAP')
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03)
print('Results ----------')
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in ranks:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
print('------------------')
if return_distmat:
return distmat
return cmc[0]
def get_base_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.0003,
'betas': (0.9, 0.999),
}
param_groups = model.parameters()
optimizer = torch.optim.Adam(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[20, 40], gamma=0.1)
return optimizer, scheduler
def get_base_sgd_optimizer(model):
kwargs = {
'weight_decay': 5e-4,
'lr': 0.001,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[25, 50], gamma=0.1)
return optimizer, scheduler
def get_RRI_optimizer(
model,
lr
):
kwargs = {
'weight_decay': 5e-4,
'lr': lr,
'momentum': 0.9,
}
param_groups = model.parameters()
optimizer = torch.optim.SGD(param_groups, **kwargs)
scheduler = init_lr_scheduler(optimizer, stepsize=[12], gamma=0.1)
return optimizer, scheduler
def train_R(model, lr, T, fix_eigen_layer: bool=False):
eigen_layers = model.module.get_fcs()
if fix_eigen_layer:
for eigen_layer in eigen_layers:
eigen_layer.eval()
for p in eigen_layer.parameters():
p.requires_grad = False
stage_name = 'restraint'
else:
model.train()
for p in model.parameters():
p.requires_grad = True
stage_name = 'relaxation'
prefix = '{}_{}_'.format(T, stage_name)
optimizer, scheduler = get_RRI_optimizer(model, lr)
for epoch in range(20):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix=prefix)
def train_base(model):
use_sgd = os.environ.get('sgd') is not None
optimizer_getter = get_base_sgd_optimizer if use_sgd else get_base_optimizer
optimizer, scheduler = get_base_optimizer(model)
model.train()
print('=== train base ===')
if True:
open_layers = ['fc', 'classifier1', 'classifier2_1', 'classifier2_2', 'fc2_1', 'fc2_2', 'reduction', 'classifier']
print('Train {} for {} epochs while keeping other layers frozen'.format(open_layers, 10))
for epoch in range(10):
open_specified_layers(model, open_layers)
train(epoch, model, criterion, optimizer, trainloader, use_gpu, fixbase=True)
print('Done. All layers are open to train for {} epochs'.format(60))
open_all_layers(model)
optimizer, scheduler = optimizer_getter(model)
for epoch in range(60):
train(epoch, model, criterion, optimizer, trainloader, use_gpu=use_gpu)
scheduler.step()
print('=> Test')
if (epoch + 1) % args.eval_freq == 0:
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': optimizer.state_dict(),
}, args.save_dir, prefix='base_')
def train_RRI(model, Ts: int=7):
base_lrs = [0.001] * 3 + [0.0001] * 10
for T in range(Ts):
print('=== T = {} ==='.format(T))
print('Replacing eigen layer weight...')
for eigen_layer in model.module.get_fcs():
replace_weight(eigen_layer)
print('Replaced.')
print('--- Restraint ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=True)
print('--- Relaxation ({}) ---'.format(T))
train_R(model, base_lrs[T], T, fix_eigen_layer=False)
for name in args.target_names:
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
rank1 = test(model, queryloader, galleryloader, use_gpu)
save_checkpoint({
'state_dict': model.state_dict(),
'rank1': rank1,
'epoch': 0,
'arch': args.arch,
'optimizer': (),
}, args.save_dir, prefix='final_')
if __name__ == '__main__':
main()
| [
"torch.cat",
"torch.trace",
"torch.max",
"torch.norm",
"torch.no_grad",
"torch.optim.Adam",
"torch.optim.SGD",
"torch.sign",
"torch.pow",
"torch.cuda.is_available",
"torch.zeros_like",
"torch.nn.DataParallel",
"torch.svd"
] | 0.4.1 | hsfzxjy/svdnet-pytorch | 8f485d0b162c23b20449f7ee80c955e0b20950ae |
1.13 | import logging
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torch.utils.data.distributed import DistributedSampler
from .dataset import CheXpert
def _get_mean_and_std(dataset: Dataset):
"""Compute the mean and std of dataset."""
data_loader = DataLoader(dataset, batch_size=1, shuffle=False)
mean = torch.zeros(3)
std = torch.zeros(3)
for i, (img, _) in enumerate(data_loader):
if i % 1000 == 0:
print(i)
mean += img.mean(dim=(0, 2, 3))
std += img.std(dim=(0, 2, 3))
mean /= len(data_loader)
std /= len(data_loader)
return mean, std
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_chexpert():
CHEXPERT_MEAN = [0.503, 0.503, 0.503]
CHEXPERT_STD = [0.291, 0.291, 0.291]
image_size = 256
train_transform = transforms.Compose(
[
# transforms.ToPILImage(),
transforms.RandomResizedCrop(image_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),
]
)
# train_transform.transforms.append(Cutout(16))
test_transform = transforms.Compose(
[
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),
]
)
return train_transform, test_transform
# for centralized training
def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, policy="zeros"):
return get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs, policy=policy)
# for local devices
def get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy="zeros"):
return get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy=policy)
def get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs=None, policy="zeros"):
dl_obj = CheXpert
transform_train, transform_test = _data_transforms_chexpert()
train_ds = dl_obj(
datadir,
dataidxs=dataidxs,
train=True,
transform=transform_train,
download=False,
policy=policy,
)
test_ds = dl_obj(
datadir,
dataidxs=None,
train=False,
transform=transform_test,
download=False,
policy=policy,
)
train_dl = DataLoader(
dataset=train_ds,
batch_size=train_bs,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=4,
)
test_dl = DataLoader(
dataset=test_ds,
batch_size=test_bs,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=4,
)
return train_dl, test_dl
def get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None, policy="zeros"):
dl_obj = CheXpert
transform_train, transform_test = _data_transforms_chexpert()
train_ds = dl_obj(
datadir,
dataidxs=dataidxs_train,
train=True,
transform=transform_train,
download=True,
policy=policy,
)
test_ds = dl_obj(
datadir,
dataidxs=dataidxs_test,
train=False,
transform=transform_test,
download=True,
policy=policy,
)
train_dl = DataLoader(
dataset=train_ds,
batch_size=train_bs,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=4,
)
test_dl = DataLoader(
dataset=test_ds,
batch_size=test_bs,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=4,
)
return train_dl, test_dl
def distributed_centralized_chexpert_loader(dataset, data_dir, world_size, rank, batch_size):
"""
Used for generating distributed dataloader for
accelerating centralized training
"""
train_bs = batch_size
test_bs = batch_size
transform_train, transform_test = _data_transforms_chexpert()
train_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=True, transform=transform_train)
test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test)
train_sam = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank)
test_sam = DistributedSampler(test_dataset, num_replicas=world_size, rank=rank)
train_dl = data.DataLoader(
train_dataset,
batch_size=train_bs,
sampler=train_sam,
pin_memory=True,
num_workers=4,
)
test_dl = data.DataLoader(
test_dataset,
batch_size=test_bs,
sampler=test_sam,
pin_memory=True,
num_workers=4,
)
class_num = 1000
train_data_num = len(train_dataset)
test_data_num = len(test_dataset)
return train_data_num, test_data_num, train_dl, test_dl, None, None, None, class_num
def load_partition_data_chexpert(
data_dir,
partition_method="random",
partition_alpha=None,
client_number=100,
batch_size=10,
policy="zeros",
):
transform_train, transform_test = _data_transforms_chexpert()
train_dataset = CheXpert(
data_dir=data_dir,
dataidxs=None,
train=True,
transform=transform_train,
policy=policy,
)
test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test, policy=policy)
# get local dataset
if partition_method == "random":
num_train_items = int(len(train_dataset) / client_number)
num_test_items = int(len(test_dataset) / client_number)
dict_client = {}
all_train_idxs = list(range(len(train_dataset)))
all_test_idxs = list(range(len(test_dataset)))
for client_idx in range(client_number):
dict_client[client_idx] = {}
dict_client[client_idx]["train"] = set(np.random.choice(all_train_idxs, num_train_items, replace=False))
dict_client[client_idx]["test"] = set(np.random.choice(all_test_idxs, num_test_items, replace=False))
all_train_idxs = list(set(all_train_idxs) - dict_client[client_idx]["train"])
all_test_idxs = list(set(all_test_idxs) - dict_client[client_idx]["test"])
if len(all_train_idxs) > 0:
all_client_idxs = list(range(client_number))
np.random.shuffle(all_client_idxs)
choiced_client_idxs = all_client_idxs[: len(all_train_idxs)]
for idx, client_idx in enumerate(choiced_client_idxs):
dict_client[client_idx]["train"].add(all_train_idxs[idx])
if len(all_test_idxs) > 0:
all_client_idxs = list(range(client_number))
np.random.shuffle(all_client_idxs)
choiced_client_idxs = all_client_idxs[: len(all_test_idxs)]
for idx, client_idx in enumerate(choiced_client_idxs):
dict_client[client_idx]["test"].add(all_test_idxs[idx])
else:
raise NotImplementedError
# build dataloader
train_dl = []
test_dl = []
for client_idx in range(client_number):
train_data_idxs = list(dict_client[client_idx]["train"])
test_data_idxs = list(dict_client[client_idx]["test"])
train_dl_, test_dl_ = get_dataloader_test_chexpert(
datadir=data_dir,
dataidxs_train=train_data_idxs,
dataidxs_test=test_data_idxs,
train_bs=batch_size,
test_bs=batch_size,
policy=policy,
)
train_dl.append(train_dl_)
test_dl.append(test_dl_)
logging.info(f"Client {client_idx} train data num: {len(train_dl_)} test data num: {len(test_dl_)}")
logging.info("Partition data done")
# logging.info("Partition data for each client: {}".format(dict_client))
train_data_num = len(train_dataset)
test_data_num = len(test_dataset)
train_data_global = train_dataset
test_data_global = test_dataset
data_local_num_dict = {
client_idx: len(dict_client[client_idx]["train"]) + len(dict_client[client_idx]["test"])
for client_idx in range(client_number)
}
train_data_local_dict = {client_idx: train_dl_ for client_idx, train_dl_ in enumerate(train_dl)}
test_data_local_dict = {client_idx: test_dl_ for client_idx, test_dl_ in enumerate(test_dl)}
class_num = train_dataset.num_classes
return (
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
)
if __name__ == "__main__":
data_path = os.path.join("D:\\", "dataset", "CheXpert", "CheXpert-v1.0-small")
data = CheXpert(data_dir=data_path, transform=transforms.ToTensor())
print(len(data))
print(data[0][0])
print(data[0][1])
# mean, std = _get_mean_and_std(data)
# print(mean, std)
# train_transform, valid_transform = _data_transforms_chexpert()
# print(train_transform)
# print(valid_transform)
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_chexpert(data_dir=data_path, client_number=10, batch_size=10, policy="zeros")
print(train_data_num, test_data_num, class_num)
| [
"torch.zeros",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler"
] | 1.13.1 | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd |
1.13 | import numpy as np
import scipy.sparse as sp
import torch
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from torch_geometric.utils import to_networkx, degree
import torch.nn.functional as F
def convert_to_nodeDegreeFeatures(graphs):
# print(graph.x)
graph_infos = []
maxdegree = 0
for i, graph in enumerate(graphs):
g = to_networkx(graph, to_undirected=True)
gdegree = max(dict(g.degree).values())
if gdegree > maxdegree:
maxdegree = gdegree
graph_infos.append(
(graph, g.degree, graph.num_nodes)
) # (graph, node_degrees, num_nodes)
new_graphs = []
for i, tuple in enumerate(graph_infos):
idx, x = tuple[0].edge_index[0], tuple[0].x
deg = degree(idx, tuple[2], dtype=torch.long)
deg = F.one_hot(deg, num_classes=maxdegree + 1).to(torch.float)
new_graph = tuple[0].clone()
new_graph.__setitem__("x", deg)
new_graphs.append(new_graph)
return new_graphs
def split_data(graphs, train=None, test=None, shuffle=True, seed=None):
y = torch.cat([graph.y for graph in graphs])
graphs_tv, graphs_test = train_test_split(
graphs,
train_size=train,
test_size=test,
stratify=y,
shuffle=shuffle,
random_state=seed,
)
return graphs_tv, graphs_test
def np_uniform_sample_next(compact_adj, tree, fanout):
last_level = tree[-1] # [batch, f^depth]
batch_lengths = compact_adj.degrees[last_level]
nodes = np.repeat(last_level, fanout, axis=1)
batch_lengths = np.repeat(batch_lengths, fanout, axis=1)
batch_next_neighbor_ids = np.random.uniform(
size=batch_lengths.shape, low=0, high=1 - 1e-9
)
# Shape = (len(nodes), neighbors_per_node)
batch_next_neighbor_ids = np.array(
batch_next_neighbor_ids * batch_lengths, dtype=last_level.dtype
)
shape = batch_next_neighbor_ids.shape
batch_next_neighbor_ids = np.array(
compact_adj.compact_adj[nodes.reshape(-1), batch_next_neighbor_ids.reshape(-1)]
).reshape(shape)
return batch_next_neighbor_ids
def np_traverse(
compact_adj, seed_nodes, fanouts=(1,), sample_fn=np_uniform_sample_next
):
if not isinstance(seed_nodes, np.ndarray):
raise ValueError("Seed must a numpy array")
if (
len(seed_nodes.shape) > 2
or len(seed_nodes.shape) < 1
or not str(seed_nodes.dtype).startswith("int")
):
raise ValueError("seed_nodes must be 1D or 2D int array")
if len(seed_nodes.shape) == 1:
seed_nodes = np.expand_dims(seed_nodes, 1)
# Make walk-tree
forest_array = [seed_nodes]
for f in fanouts:
next_level = sample_fn(compact_adj, forest_array, f)
assert next_level.shape[1] == forest_array[-1].shape[1] * f
forest_array.append(next_level)
return forest_array
class WalkForestCollator(object):
def __init__(self, normalize_features=False):
self.normalize_features = normalize_features
def __call__(self, molecule):
comp_adj, feature_matrix, label, fanouts = molecule[0]
node_ids = np.array(list(range(feature_matrix.shape[0])), dtype=np.int32)
forest = np_traverse(comp_adj, node_ids, fanouts)
torch_forest = [torch.from_numpy(forest[0]).flatten()]
label = np.where(np.isnan(label), 0.0, label)
for i in range(len(forest) - 1):
torch_forest.append(torch.from_numpy(forest[i + 1]).reshape(-1, fanouts[i]))
if self.normalize_features:
mx = sp.csr_matrix(feature_matrix)
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
normalized_feature_matrix = r_mat_inv.dot(mx)
normalized_feature_matrix = np.array(normalized_feature_matrix.todense())
else:
scaler = StandardScaler()
scaler.fit(feature_matrix)
normalized_feature_matrix = scaler.transform(feature_matrix)
return (
torch_forest,
torch.as_tensor(normalized_feature_matrix, dtype=torch.float32),
torch.as_tensor(label, dtype=torch.float32),
)
class DefaultCollator(object):
def __init__(self, normalize_features=True, normalize_adj=True):
self.normalize_features = normalize_features
self.normalize_adj = normalize_adj
def __call__(self, molecule):
adj_matrix, feature_matrix, label, _ = molecule[0]
label = np.where(np.isnan(label), 0.0, label)
if self.normalize_features:
mx = sp.csr_matrix(feature_matrix)
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.0
r_mat_inv = sp.diags(r_inv)
normalized_feature_matrix = r_mat_inv.dot(mx)
normalized_feature_matrix = np.array(normalized_feature_matrix.todense())
else:
scaler = StandardScaler()
scaler.fit(feature_matrix)
normalized_feature_matrix = scaler.transform(feature_matrix)
if self.normalize_adj:
rowsum = np.array(adj_matrix.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.0
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
normalized_adj_matrix = (
adj_matrix.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)
)
else:
normalized_adj_matrix = adj_matrix
return (
torch.as_tensor(
np.array(normalized_adj_matrix.todense()), dtype=torch.float32
),
torch.as_tensor(normalized_feature_matrix, dtype=torch.float32),
torch.as_tensor(label, dtype=torch.float32),
) | [
"torch.cat",
"torch.nn.functional.one_hot",
"torch.from_numpy",
"torch.as_tensor"
] | 1.13.1 | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd |
1.0 | import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.utils import clones
class LayerNormGoogle(nn.Module):
def __init__(self, features, epsilon=1e-6):
super(LayerNormGoogle, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.epsilon = epsilon
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.epsilon) + self.b_2
class EncoderBlockGoogle(nn.Module):
def __init__(self, layer, num_layers):
super(EncoderBlockGoogle, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = LayerNormGoogle(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class ResidualConnectionGoogle(nn.Module):
def __init__(self, size, keep_prob):
super(ResidualConnectionGoogle, self).__init__()
self.norm = LayerNormGoogle(size)
# TODO: Use dropout interface
self.dropout = nn.Dropout(keep_prob)
def forward(self, input, sublayer):
return input + self.dropout(sublayer(self.norm(input)))
class EncoderLayerGoogle(nn.Module):
def __init__(self, size, attention, feed_forward, keep_prob):
super(EncoderLayerGoogle, self).__init__()
self.size = size
self.attention = attention
self.feed_forward = feed_forward
# Each encoder layer has two sublayers
self.sublayer = clones(ResidualConnectionGoogle(size, keep_prob), 2)
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.attention(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class EncoderClassifier(nn.Module):
def __init__(self, embedding, encoder, classifier, device, is_average=True):
super(EncoderClassifier, self).__init__()
self.embedding = embedding
self.encoder = encoder
self.classifier = classifier
self.device = device
self.is_average = is_average
def forward(self, x, mask=None):
kl_loss = torch.Tensor([0.0])
# Initial x.size() = [length, batch_size]
x = x.permute(1, 0)
# After permute x.size = [batch_size, length]
x = self.embedding(x)
if "cuda" in str(self.device):
x = x.cuda()
kl_loss = kl_loss.cuda()
x = self.encoder(x, mask)
if self.is_average:
# Averaged sentence representation
x = torch.mean(x, dim=1)
x = self.classifier(x)
return x, kl_loss
class Classifier(nn.Module):
def __init__(self, d_model, d_hidden, num_classes, keep_prob):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(d_hidden, num_classes)
def forward(self, x):
x = self.dropout(self.relu(self.linear1(x)))
x = self.linear2(x)
return x
class MultiHeadedAttentionGoogle(nn.Module):
def __init__(self, heads=8, d_model=512, keep_prob=0.1):
super(MultiHeadedAttentionGoogle, self).__init__()
assert d_model % heads == 0
self.d_k = d_model // heads
self.heads = heads
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(keep_prob)
def attention(self, query, key, value, mask=None):
# Dot product attention
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
num_batches = query.size(0)
if mask is not None:
mask = mask.unsqueeze(1)
# Apply linear projection on the input sequence and split the heads.
query, key, value = [linear(x).view(num_batches, -1, self.heads, self.d_k).transpose(1, 2)
for linear, x in zip(self.linears, (query, key, value))]
# Apply attention on the projected and splitted vectors
x, self.attn = self.attention(query, key, value, mask=mask)
# Concat vectors and apply linear
x = x.transpose(1, 2).contiguous().view(num_batches, -1, self.heads * self.d_k)
return self.linears[-1](x)
class PositionalFeedForwardGoogle(nn.Module):
def __init__(self, d_model, d_ff, keep_prob=0.1):
super(PositionalFeedForwardGoogle, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
def forward(self, input):
return self.w_2(self.dropout(self.relu(self.w_1(input))))
class Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, padding_id, use_pretrained_embed, pretrained_weights,
optional_sqrt_mul=False):
super(Embeddings, self).__init__()
# Initialize embeddings
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_id).cpu()
if use_pretrained_embed:
self.embedding.from_pretrained(pretrained_weights)
self.embed_dim = embed_dim
self.optional_sqrt_mul = optional_sqrt_mul
def forward(self, input):
if self.optional_sqrt_mul:
return self.embedding(input) * math.sqrt(self.embed_dim)
else:
return self.embedding(input)
class PositionalEncodingGoogle(nn.Module):
def __init__(self, d_model, keep_prob=0.1, max_len=5000):
super(PositionalEncodingGoogle, self).__init__()
self.dropout = nn.Dropout(keep_prob)
positional_encoding = torch.zeros(max_len, d_model)
pos = torch.arange(0., max_len).unsqueeze(1)
# Log space
div_term = torch.exp(torch.arange(0., d_model, 2) * (-math.log(10000) / d_model))
positional_encoding[:, 0::2] = torch.sin(pos * div_term)
positional_encoding[:, 1::2] = torch.cos(pos * div_term)
positional_encoding = positional_encoding.unsqueeze(0)
self.register_buffer("pe", positional_encoding)
def forward(self, input):
return self.dropout(input + Variable(self.pe[:, :input.size(1)], requires_grad=False))
class TransformerGoogle:
def __init__(self, args):
super(TransformerGoogle, self).__init__()
self.args_common = args["common_model_properties"]
self.args_specific = args["transformer_google"]
# Device
self.device = self.args_common["device"]
# Input/Output dimensions
self.vocab_size = self.args_common["vocab_size"]
self.embed_dim = self.args_common["embed_dim"]
self.num_class = self.args_common["num_class"]
# Embedding parameters
self.padding_id = self.args_common["padding_id"]
# Condition parameters
self.use_pretrained_embed = self.args_common["use_pretrained_embed"]
self.use_embed_sqrt_mul = self.args_specific["use_embed_sqrt_mul"]
# Pretrained embedding weights
self.pretrained_weights = self.args_common["pretrained_weights"]
# Dropout probabilities for each individual part of the full model.
self.keep_prob_encoder = self.args_specific["keep_prob_encoder"]
self.keep_prob_pe = self.args_specific["keep_prob_pe"]
self.kee_prob_pff = self.args_specific["keep_prob_pff"]
self.keep_prob_attn = self.args_specific["keep_prob_attn"]
self.keep_prob_clf = self.args_specific["keep_prob_clf"]
# Condition parameter for the transformer type (It only supports classification for now)
self.transformer_type = self.args_specific["transformer_type"]
# Number of parallel attention layers for MultiHeadedAttention
self.heads = self.args_specific["heads"]
# Number of encoder layers
self.num_encoder_layers = self.args_specific["num_encoder_layers"]
# Number of hidden count units for Position-Wise Feed-Forward Network
self.num_hidden_pos_ff = self.args_specific["num_hidden_pos_ff"]
# Maximum length of an input
self.max_length = self.args_specific["max_length"]
if self.transformer_type == "classifier":
self.model = self.create_classifier_transformer()
else:
raise ValueError("Transformer can be created as classifier for now!")
def create_classifier_transformer(self):
c = copy.deepcopy
# Initialize individual parts of the full model
# attention = torch.nn.MultiheadAttention(num_heads=self.heads, embed_dim=self.embed_dim,
# dropout=self.keep_prob_attn)
attention = MultiHeadedAttentionGoogle(heads=self.heads, d_model=self.embed_dim, keep_prob=self.keep_prob_attn)
ff = PositionalFeedForwardGoogle(d_model=self.embed_dim, d_ff=self.num_hidden_pos_ff,
keep_prob=self.kee_prob_pff)
embeddings = Embeddings(self.embed_dim, self.vocab_size, self.padding_id, self.use_pretrained_embed,
self.pretrained_weights, optional_sqrt_mul=self.use_embed_sqrt_mul)
positional_embeddings = PositionalEncodingGoogle(d_model=self.embed_dim, keep_prob=self.keep_prob_pe,
max_len=self.max_length)
# Initialize the full model
model = EncoderClassifier(nn.Sequential(embeddings, c(positional_embeddings)),
EncoderBlockGoogle(
EncoderLayerGoogle(self.embed_dim, c(attention), c(ff), self.keep_prob_encoder),
self.num_encoder_layers),
Classifier(self.embed_dim, d_hidden=self.embed_dim // 2, num_classes=self.num_class,
keep_prob=self.keep_prob_clf),
device=self.device)
# Initialize model parameters
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
if __name__ == '__main__':
print("Transformer tests")
plt.figure(figsize=(15, 5))
pe = PositionalEncodingGoogle(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())
plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
plt.show()
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.cos",
"torch.nn.Embedding",
"torch.sin",
"torch.arange",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.ones",
"torch.matmul",
"torch.nn.functional.softmax",
"torch.Tensor",
"torch.mean"
] | 1.0.1 | SunYanCN/nlp-experiments-in-pytorch | 5d05a53146dffd707e4d037230656f980d7be05c |
1.7 | import pandas as pd
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from backend.services.toxic_comment_jigsaw.application.ai.model import BERTClassifier
from backend.services.toxic_comment_jigsaw.application.ai.training.src.dataset import BERTDataset
from backend.services.toxic_comment_jigsaw.application.ai.training.src.preprocess import Preprocess
from backend.services.toxic_comment_jigsaw.application.ai.training.src.engine import Engine
from backend.services.toxic_comment_jigsaw.application.ai.settings import Settings
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import DataLoader
class Train:
def __init__(self):
# initialize required class
self.settings = Settings
self.engine = Engine()
self.preprocess = Preprocess()
# initialize required variables
self.bert_classifier = None
self.optimizer = None
self.scheduler = None
self.train_data_loader = None
self.val_data_loader = None
self.total_steps = None
self.best_accuracy = 0
def __initialize(self):
# Instantiate Bert Classifier
self.bert_classifier = BERTClassifier(freeze_bert=False)
self.bert_classifier.to(self.settings.DEVICE)
# Create the optimizer
self.optimizer = AdamW(self.bert_classifier.parameters(),
lr=5e-5, # Default learning rate
eps=1e-8 # Default epsilon value
)
# Set up the learning rate scheduler
self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
num_warmup_steps=0, # Default value
num_training_steps=self.total_steps)
def crete_data_loaders(self, dataset):
pass
def load_data(self):
train_df = pd.read_csv(self.settings.TRAIN_DATA).fillna("none")
train_df['comment_text'] = train_df['comment_text'].apply(lambda x: self.preprocess.clean_text(x))
X = list(train_df['comment_text'])
y = np.array(train_df.loc[:, 'toxic':])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=self.settings.RANDOM_STATE)
# training dataset
train_dataset = BERTDataset(X_train, y_train)
# validation dataset
val_dataset = BERTDataset(X_val, y_val)
self.train_data_loader = DataLoader(train_dataset,
batch_size=self.settings.TRAIN_BATCH_SIZE,
shuffle=True,
num_workers=self.settings.TRAIN_NUM_WORKERS)
self.val_data_loader = DataLoader(val_dataset,
batch_size=self.settings.VALID_BATCH_SIZE,
shuffle=True,
num_workers=self.settings.VAL_NUM_WORKERS)
self.total_steps = int(len(X_train) / self.settings.TRAIN_BATCH_SIZE * self.settings.EPOCHS)
def train(self):
for epochs in range(self.settings.EPOCHS):
# calling the training function in engine.py file
self.engine.train_fn(data_loader=self.train_data_loader,
model=self.bert_classifier,
optimizer=self.optimizer,
device=self.settings.DEVICE,
schedular=self.scheduler)
# calling the evaluation function from the engine.py file to compute evaluation
val_loss, val_accuracy = self.engine.eval_fn(data_loader=self.val_data_loader,
model=self.bert_classifier,
device=self.settings.DEVICE)
# updating the accuracy
if val_accuracy > self.best_accuracy:
torch.save(self.bert_classifier.state_dict(), self.settings.MODEL_PATH)
self.best_accuracy = val_accuracy
def run(self):
try:
print("Loading and Preparing the Dataset-----!! ")
self.load_data()
print("Dataset Successfully Loaded and Prepared-----!! ")
print()
print("-" * 70)
print("Loading and Initializing the Bert Model -----!! ")
self.__initialize()
print("Model Successfully Loaded and Initialized-----!! ")
print()
print("-" * 70)
print("------------------Starting Training-----------!!")
self.engine.set_seed()
self.train()
print("Training complete-----!!!")
except BaseException as ex:
print("Following Exception Occurred---!! ", str(ex))
| [
"torch.utils.data.DataLoader"
] | 1.7.0 | R-aryan/Jigsaw-Toxic-Comment-Classification | e5e4da7df379ac1b315f2bde655386180f39c517 |
1.9 | #!/usr/bin/env python3
import unittest
import torch
import gpytorch
from gpytorch.test.variational_test_case import VariationalTestCase
class TestUnwhitenedVariationalGP(VariationalTestCase, unittest.TestCase):
@property
def batch_shape(self):
return torch.Size([])
@property
def distribution_cls(self):
return gpytorch.variational.CholeskyVariationalDistribution
@property
def mll_cls(self):
return gpytorch.mlls.VariationalELBO
@property
def strategy_cls(self):
return gpytorch.variational.UnwhitenedVariationalStrategy
def test_training_iteration(self, *args, **kwargs):
cg_mock, cholesky_mock, ciq_mock = super().test_training_iteration(*args, **kwargs)
self.assertFalse(cg_mock.called)
self.assertFalse(ciq_mock.called)
if self.distribution_cls == gpytorch.variational.CholeskyVariationalDistribution:
self.assertEqual(cholesky_mock.call_count, 3) # One for each forward pass, once for initialization
else:
self.assertEqual(cholesky_mock.call_count, 2) # One for each forward pass
def test_eval_iteration(self, *args, **kwargs):
cg_mock, cholesky_mock, ciq_mock = super().test_eval_iteration(*args, **kwargs)
self.assertFalse(cg_mock.called)
self.assertFalse(ciq_mock.called)
self.assertEqual(cholesky_mock.call_count, 1) # One to compute cache, that's it!
def test_fantasy_call(self, *args, **kwargs):
# we only want to check CholeskyVariationalDistribution
if self.distribution_cls is gpytorch.variational.CholeskyVariationalDistribution:
return super().test_fantasy_call(*args, **kwargs)
with self.assertRaises(AttributeError):
super().test_fantasy_call(*args, **kwargs)
class TestUnwhitenedPredictiveGP(TestUnwhitenedVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.PredictiveLogLikelihood
class TestUnwhitenedRobustVGP(TestUnwhitenedVariationalGP):
@property
def mll_cls(self):
return gpytorch.mlls.GammaRobustVariationalELBO
class TestUnwhitenedMeanFieldVariationalGP(TestUnwhitenedVariationalGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestUnwhitenedMeanFieldPredictiveGP(TestUnwhitenedPredictiveGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestUnwhitenedMeanFieldRobustVGP(TestUnwhitenedRobustVGP):
@property
def distribution_cls(self):
return gpytorch.variational.MeanFieldVariationalDistribution
class TestUnwhitenedDeltaVariationalGP(TestUnwhitenedVariationalGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
class TestUnwhitenedDeltaPredictiveGP(TestUnwhitenedPredictiveGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
class TestUnwhitenedDeltaRobustVGP(TestUnwhitenedRobustVGP):
@property
def distribution_cls(self):
return gpytorch.variational.DeltaVariationalDistribution
if __name__ == "__main__":
unittest.main()
| [
"torch.Size"
] | 1.9 | jrg365/gpytorch | 52bf07a3a3c55a570b22ff2bf3825adf4a6e259d |
1.4 | import time
import torch
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
opt = TrainOptions().parse() # get training options
dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
dataset_size = len(dataset) # get the number of images in the dataset.
model = create_model(opt) # create a model given opt.model and other options
print('The number of training images = %d' % dataset_size)
visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
opt.visualizer = visualizer
total_iters = 0 # the total number of training iterations
optimize_time = 0.1
times = []
for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time = time.time() # timer for entire epoch
iter_data_time = time.time() # timer for data loading per iteration
epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
dataset.set_epoch(epoch)
for i, data in enumerate(dataset): # inner loop within one epoch
iter_start_time = time.time() # timer for computation per iteration
if total_iters % opt.print_freq == 0:
t_data = iter_start_time - iter_data_time
batch_size = data["A"].size(0)
total_iters += batch_size
epoch_iter += batch_size
torch.cuda.synchronize()
optimize_start_time = time.time()
model.set_input(data) # unpack data from dataset and apply preprocessing
if epoch == opt.epoch_count and i == 0:
model.data_dependent_initialize()
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
model.optimize_parameters() # calculate loss functions, get gradients, update network weights
torch.cuda.synchronize()
optimize_time = (time.time() - optimize_start_time) / batch_size * 0.005 + 0.995 * optimize_time
if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
save_result = total_iters % opt.update_html_freq == 0
model.compute_visuals()
visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
losses = model.get_current_losses()
visualizer.print_current_losses(epoch, epoch_iter, losses, optimize_time, t_data)
if opt.display_id is None or opt.display_id > 0:
visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
if total_iters % opt.save_latest_freq == 0: # cache our latest model every <save_latest_freq> iterations
print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
print(opt.name) # it's useful to occasionally show the experiment name on console
save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
model.save_networks(save_suffix)
iter_data_time = time.time()
if epoch % opt.save_epoch_freq == 0: # cache our model every <save_epoch_freq> epochs
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
model.save_networks('latest')
# model.save_networks(epoch)
print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
model.update_learning_rate() # update learning rates at the end of every epoch.
| [
"torch.cuda.synchronize"
] | 1.4.0 | sumanyumuku98/contrastive-unpaired-translation | 91738727123252e39c4e23f75f93cad737c0d718 |
1.3 | import torch
from .optimizer import Optimizer
class Adagrad(Optimizer):
"""Implements Adagrad algorithm.
It has been proposed in `Adaptive Subgradient Methods for Online Learning
and Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-10)
.. _Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization: http://jmlr.org/papers/v12/duchi11a.html
"""
def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= lr_decay:
raise ValueError("Invalid lr_decay value: {}".format(lr_decay))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if not 0.0 <= initial_accumulator_value:
raise ValueError("Invalid initial_accumulator_value value: {}".format(initial_accumulator_value))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
defaults = dict(lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value)
super(Adagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.full_like(p.data, initial_accumulator_value)
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if grad.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum'].sparse_mask(grad)
std_values = std._values().sqrt_().add_(group['eps'])
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(group['eps'])
p.data.addcdiv_(-clr, grad, std)
return loss
| [
"torch.full_like"
] | 1.3.1 | countBMB/BenjiRepo | 79d882263baaf2a11654ca67d2e5593074d36dfa |
1.3 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch
import sys
import unittest
from scipy import interpolate
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, utils
from caffe2.proto import caffe2_pb2
import caffe2.python.operator_test.detectron_keypoints as keypoint_utils
NUM_TEST_ROI = 14
NUM_KEYPOINTS = 19
HEATMAP_SIZE = 56
def heatmap_FAIR_keypoint_ref(maps, rois):
return [keypoint_utils.heatmaps_to_keypoints(maps, rois)]
def heatmap_approx_keypoint_ref(maps, rois):
return [keypoint_utils.approx_heatmap_keypoint(maps, rois)]
def c10_op_ref(maps, rois):
keypoints = torch.ops._caffe2.HeatmapMaxKeypoint(
torch.Tensor(maps),
torch.Tensor(rois),
should_output_softmax=True,
)
return [keypoints.numpy()]
class TestHeatmapMaxKeypointOp(hu.HypothesisTestCase):
def setUp(self):
super(TestHeatmapMaxKeypointOp, self).setUp()
np.random.seed(0)
# initial coordinates and interpolate HEATMAP_SIZE from it
HEATMAP_SMALL_SIZE = 4
bboxes_in = 500 * np.random.rand(NUM_TEST_ROI, 4).astype(np.float32)
# only bbox with smaller first coordiantes
for i in range(NUM_TEST_ROI):
if bboxes_in[i][0] > bboxes_in[i][2]:
tmp = bboxes_in[i][2]
bboxes_in[i][2] = bboxes_in[i][0]
bboxes_in[i][0] = tmp
if bboxes_in[i][1] > bboxes_in[i][3]:
tmp = bboxes_in[i][3]
bboxes_in[i][3] = bboxes_in[i][1]
bboxes_in[i][1] = tmp
# initial randomized coordiantes for heatmaps and expand it with interpolation
init = np.random.rand(
NUM_TEST_ROI,
NUM_KEYPOINTS,
HEATMAP_SMALL_SIZE,
HEATMAP_SMALL_SIZE).astype(np.float32)
heatmaps_in = np.zeros(
(NUM_TEST_ROI, NUM_KEYPOINTS, HEATMAP_SIZE, HEATMAP_SIZE)
).astype(np.float32)
for roi in range(NUM_TEST_ROI):
for keyp in range(NUM_KEYPOINTS):
f = interpolate.interp2d(
np.arange(0, 1, 1.0 / HEATMAP_SMALL_SIZE),
np.arange(0, 1, 1.0 / HEATMAP_SMALL_SIZE),
init[roi][keyp],
kind='cubic')
heatmaps_in[roi][keyp] = f(
np.arange(0, 1, 1.0 / HEATMAP_SIZE),
np.arange(0, 1, 1.0 / HEATMAP_SIZE))
self.heatmaps_in = heatmaps_in
self.bboxes_in = bboxes_in
self.op = core.CreateOperator(
'HeatmapMaxKeypoint',
['heatmaps_in', 'bboxes_in'],
['keypoints_out'],
arg=[
utils.MakeArgument("should_output_softmax", True),
],
device_option=caffe2_pb2.DeviceOption())
@unittest.skipIf('cv2' not in sys.modules, 'python-opencv is not installed')
def test_close_to_FAIR(self):
# 10 pixel error in scale of 500px bbox
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[self.heatmaps_in, self.bboxes_in],
reference=heatmap_FAIR_keypoint_ref,
threshold=10,
)
def test_approx_heatmap_keypoint(self):
# C++/Python implementation should be bit-wise equal
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[self.heatmaps_in, self.bboxes_in],
reference=heatmap_approx_keypoint_ref,
)
def test_special_cases(self):
example_bboxes = np.array([[0, 0, 100, 100]]).astype(np.float32)
heatmap_tests = []
# special case #1
heatmap_tests.append(np.array([
[0.14722, 0.807823, 0.447052],
[0.652919, 0.850923, -0.225462],
[0.805912, 0.75778, -0.563371],
]).astype(np.float32).reshape((1, 1, 3, 3)))
# special case #2
heatmap_tests.append(np.array([
[3.19541, 3.69551, 3.87579],
[3.63094, 3.89978, 3.67606],
[3.78555, 3.87291, 3.28083],
]).astype(np.float32).reshape((1, 1, 3, 3)))
for heatmap_test in heatmap_tests:
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[heatmap_test, example_bboxes],
reference=heatmap_approx_keypoint_ref,
)
def test_caffe2_pytorch_eq(self):
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[self.heatmaps_in, self.bboxes_in],
reference=c10_op_ref,
)
if __name__ == "__main__":
unittest.main()
| [
"torch.Tensor"
] | 1.3.1 | countBMB/BenjiRepo | 79d882263baaf2a11654ca67d2e5593074d36dfa |
1.8 | from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update
from stable_baselines3.sac.policies import SACPolicy
class SAC(OffPolicyAlgorithm):
"""
Soft Actor-Critic (SAC)
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo
(https://github.com/rail-berkeley/softlearning/)
and from Stable Baselines (https://github.com/hill-a/stable-baselines)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
Note: we use double q target and not value target as discussed
in https://github.com/hill-a/stable-baselines/issues/270
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param ent_coef: Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param target_update_interval: update the target network every ``target_network_update_freq``
gradient steps.
:param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[SACPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 256,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 1,
gradient_steps: int = 1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
ent_coef: Union[str, float] = "auto",
target_update_interval: int = 1,
target_entropy: Union[str, float] = "auto",
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(SAC, self).__init__(
policy,
env,
SACPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
use_sde_at_warmup=use_sde_at_warmup,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Box),
support_multi_env=True,
)
self.target_entropy = target_entropy
self.log_ent_coef = None # type: Optional[th.Tensor]
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.ent_coef_optimizer = None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(SAC, self)._setup_model()
self._create_aliases()
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == "auto":
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
# Default initial value of ent_coef when learned
init_value = 1.0
if "_" in self.ent_coef:
init_value = float(self.ent_coef.split("_")[1])
assert init_value > 0.0, "The initial value of ent_coef must be greater than 0"
# Note: we optimize the log of the entropy coeff which is slightly different from the paper
# as discussed in https://github.com/rail-berkeley/softlearning/issues/37
self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)
self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 64) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizers learning rate
optimizers = [self.actor.optimizer, self.critic.optimizer]
if self.ent_coef_optimizer is not None:
optimizers += [self.ent_coef_optimizer]
# Update learning rate according to lr schedule
self._update_learning_rate(optimizers)
ent_coef_losses, ent_coefs = [], []
actor_losses, critic_losses = [], []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
# We need to sample because `log_std` may have changed between two gradient steps
if self.use_sde:
self.actor.reset_noise()
# Action by the current actor for the sampled state
actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)
log_prob = log_prob.reshape(-1, 1)
ent_coef_loss = None
if self.ent_coef_optimizer is not None:
# Important: detach the variable from the graph
# so we don't change it with other losses
# see https://github.com/rail-berkeley/softlearning/issues/60
ent_coef = th.exp(self.log_ent_coef.detach())
ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()
ent_coef_losses.append(ent_coef_loss.item())
else:
ent_coef = self.ent_coef_tensor
ent_coefs.append(ent_coef.item())
# Optimize entropy coefficient, also called
# entropy temperature or alpha in the paper
if ent_coef_loss is not None:
self.ent_coef_optimizer.zero_grad()
ent_coef_loss.backward()
self.ent_coef_optimizer.step()
with th.no_grad():
# Select action according to policy
next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)
# Compute the next Q values: min over all critics targets
next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
# add entropy term
next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)
# td error + entropy term
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
# using action from the replay buffer
current_q_values = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = 0.5 * sum([F.mse_loss(current_q, target_q_values) for current_q in current_q_values])
critic_losses.append(critic_loss.item())
# Optimize the critic
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Compute actor loss
# Alternative: actor_loss = th.mean(log_prob - qf1_pi)
# Mean over all critic networks
q_values_pi = th.cat(self.critic.forward(replay_data.observations, actions_pi), dim=1)
min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)
actor_loss = (ent_coef * log_prob - min_qf_pi).mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
# Update target networks
if gradient_step % self.target_update_interval == 0:
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
self._n_updates += gradient_steps
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/ent_coef", np.mean(ent_coefs))
self.logger.record("train/actor_loss", np.mean(actor_losses))
self.logger.record("train/critic_loss", np.mean(critic_losses))
if len(ent_coef_losses) > 0:
self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "SAC",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(SAC, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(SAC, self)._excluded_save_params() + ["actor", "critic", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
if self.ent_coef_optimizer is not None:
saved_pytorch_variables = ["log_ent_coef"]
state_dicts.append("ent_coef_optimizer")
else:
saved_pytorch_variables = ["ent_coef_tensor"]
return state_dicts, saved_pytorch_variables
| [
"torch.min",
"torch.no_grad",
"torch.ones",
"torch.nn.functional.mse_loss"
] | 1.8.1 | squalidux/stable-baselines3 | 72690b3ed0635c68f037b3dc121bd9987a6e82a8 |
1.8 | import os
from copy import deepcopy
import numpy as np
import pytest
import torch as th
from gym import spaces
from stable_baselines3 import A2C, DQN, PPO, SAC, TD3
from stable_baselines3.common.envs import FakeImageEnv
from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first
from stable_baselines3.common.utils import zip_strict
from stable_baselines3.common.vec_env import DummyVecEnv, VecFrameStack, VecTransposeImage, is_vecenv_wrapped
@pytest.mark.parametrize("model_class", [A2C, PPO, SAC, TD3, DQN])
def test_cnn(tmp_path, model_class):
SAVE_NAME = "cnn_model.zip"
# Fake grayscale with frameskip
# Atari after preprocessing: 84x84x1, here we are using lower resolution
# to check that the network handle it automatically
env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=model_class not in {SAC, TD3})
if model_class in {A2C, PPO}:
kwargs = dict(n_steps=64)
else:
# Avoid memory error when using replay buffer
# Reduce the size of the features
kwargs = dict(
buffer_size=250,
policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)),
seed=1,
)
model = model_class("CnnPolicy", env, **kwargs).learn(250)
# FakeImageEnv is channel last by default and should be wrapped
assert is_vecenv_wrapped(model.get_env(), VecTransposeImage)
obs = env.reset()
# Test stochastic predict with channel last input
if model_class == DQN:
model.exploration_rate = 0.9
for _ in range(10):
model.predict(obs, deterministic=False)
action, _ = model.predict(obs, deterministic=True)
model.save(tmp_path / SAVE_NAME)
del model
model = model_class.load(tmp_path / SAVE_NAME)
# Check that the prediction is the same
assert np.allclose(action, model.predict(obs, deterministic=True)[0])
os.remove(str(tmp_path / SAVE_NAME))
@pytest.mark.parametrize("model_class", [A2C])
def test_vec_transpose_skip(tmp_path, model_class):
# Fake grayscale with frameskip
env = FakeImageEnv(
screen_height=41, screen_width=40, n_channels=10, discrete=model_class not in {SAC, TD3}, channel_first=True
)
env = DummyVecEnv([lambda: env])
# Stack 5 frames so the observation is now (50, 40, 40) but the env is still channel first
env = VecFrameStack(env, 5, channels_order="first")
obs_shape_before = env.reset().shape
# The observation space should be different as the heuristic thinks it is channel last
assert not np.allclose(obs_shape_before, VecTransposeImage(env).reset().shape)
env = VecTransposeImage(env, skip=True)
# The observation space should be the same as we skip the VecTransposeImage
assert np.allclose(obs_shape_before, env.reset().shape)
kwargs = dict(
n_steps=64,
policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)),
seed=1,
)
model = model_class("CnnPolicy", env, **kwargs).learn(250)
obs = env.reset()
action, _ = model.predict(obs, deterministic=True)
def patch_dqn_names_(model):
# Small hack to make the test work with DQN
if isinstance(model, DQN):
model.critic = model.q_net
model.critic_target = model.q_net_target
def params_should_match(params, other_params):
for param, other_param in zip_strict(params, other_params):
assert th.allclose(param, other_param)
def params_should_differ(params, other_params):
for param, other_param in zip_strict(params, other_params):
assert not th.allclose(param, other_param)
def check_td3_feature_extractor_match(model):
for (key, actor_param), critic_param in zip(model.actor_target.named_parameters(), model.critic_target.parameters()):
if "features_extractor" in key:
assert th.allclose(actor_param, critic_param), key
def check_td3_feature_extractor_differ(model):
for (key, actor_param), critic_param in zip(model.actor_target.named_parameters(), model.critic_target.parameters()):
if "features_extractor" in key:
assert not th.allclose(actor_param, critic_param), key
@pytest.mark.parametrize("model_class", [SAC, TD3, DQN])
@pytest.mark.parametrize("share_features_extractor", [True, False])
def test_features_extractor_target_net(model_class, share_features_extractor):
if model_class == DQN and share_features_extractor:
pytest.skip()
env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=model_class not in {SAC, TD3})
# Avoid memory error when using replay buffer
# Reduce the size of the features
kwargs = dict(buffer_size=250, learning_starts=100, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)))
if model_class != DQN:
kwargs["policy_kwargs"]["share_features_extractor"] = share_features_extractor
# No delay for TD3 (changes when the actor and polyak update take place)
if model_class == TD3:
kwargs["policy_delay"] = 1
model = model_class("CnnPolicy", env, seed=0, **kwargs)
patch_dqn_names_(model)
if share_features_extractor:
# Check that the objects are the same and not just copied
assert id(model.policy.actor.features_extractor) == id(model.policy.critic.features_extractor)
if model_class == TD3:
assert id(model.policy.actor_target.features_extractor) == id(model.policy.critic_target.features_extractor)
# Actor and critic feature extractor should be the same
td3_features_extractor_check = check_td3_feature_extractor_match
else:
# Actor and critic feature extractor should differ same
td3_features_extractor_check = check_td3_feature_extractor_differ
# Check that the object differ
if model_class != DQN:
assert id(model.policy.actor.features_extractor) != id(model.policy.critic.features_extractor)
if model_class == TD3:
assert id(model.policy.actor_target.features_extractor) != id(model.policy.critic_target.features_extractor)
# Critic and target should be equal at the begginning of training
params_should_match(model.critic.parameters(), model.critic_target.parameters())
# TD3 has also a target actor net
if model_class == TD3:
params_should_match(model.actor.parameters(), model.actor_target.parameters())
model.learn(200)
# Critic and target should differ
params_should_differ(model.critic.parameters(), model.critic_target.parameters())
if model_class == TD3:
params_should_differ(model.actor.parameters(), model.actor_target.parameters())
td3_features_extractor_check(model)
# Re-initialize and collect some random data (without doing gradient steps,
# since 10 < learning_starts = 100)
model = model_class("CnnPolicy", env, seed=0, **kwargs).learn(10)
patch_dqn_names_(model)
original_param = deepcopy(list(model.critic.parameters()))
original_target_param = deepcopy(list(model.critic_target.parameters()))
if model_class == TD3:
original_actor_target_param = deepcopy(list(model.actor_target.parameters()))
# Deactivate copy to target
model.tau = 0.0
model.train(gradient_steps=1)
# Target should be the same
params_should_match(original_target_param, model.critic_target.parameters())
if model_class == TD3:
params_should_match(original_actor_target_param, model.actor_target.parameters())
td3_features_extractor_check(model)
# not the same for critic net (updated by gradient descent)
params_should_differ(original_param, model.critic.parameters())
# Update the reference as it should not change in the next step
original_param = deepcopy(list(model.critic.parameters()))
if model_class == TD3:
original_actor_param = deepcopy(list(model.actor.parameters()))
# Deactivate learning rate
model.lr_schedule = lambda _: 0.0
# Re-activate polyak update
model.tau = 0.01
# Special case for DQN: target net is updated in the `collect_rollouts()`
# not the `train()` method
if model_class == DQN:
model.target_update_interval = 1
model._on_step()
model.train(gradient_steps=1)
# Target should have changed now (due to polyak update)
params_should_differ(original_target_param, model.critic_target.parameters())
# Critic should be the same
params_should_match(original_param, model.critic.parameters())
if model_class == TD3:
params_should_differ(original_actor_target_param, model.actor_target.parameters())
params_should_match(original_actor_param, model.actor.parameters())
td3_features_extractor_check(model)
def test_channel_first_env(tmp_path):
# test_cnn uses environment with HxWxC setup that is transposed, but we
# also want to work with CxHxW envs directly without transposing wrapper.
SAVE_NAME = "cnn_model.zip"
# Create environment with transposed images (CxHxW).
# If underlying CNN processes the data in wrong format,
# it will raise an error of negative dimension sizes while creating convolutions
env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=True, channel_first=True)
model = A2C("CnnPolicy", env, n_steps=100).learn(250)
assert not is_vecenv_wrapped(model.get_env(), VecTransposeImage)
obs = env.reset()
action, _ = model.predict(obs, deterministic=True)
model.save(tmp_path / SAVE_NAME)
del model
model = A2C.load(tmp_path / SAVE_NAME)
# Check that the prediction is the same
assert np.allclose(action, model.predict(obs, deterministic=True)[0])
os.remove(str(tmp_path / SAVE_NAME))
def test_image_space_checks():
not_image_space = spaces.Box(0, 1, shape=(10,))
assert not is_image_space(not_image_space)
# Not uint8
not_image_space = spaces.Box(0, 255, shape=(10, 10, 3))
assert not is_image_space(not_image_space)
# Not correct shape
not_image_space = spaces.Box(0, 255, shape=(10, 10), dtype=np.uint8)
assert not is_image_space(not_image_space)
# Not correct low/high
not_image_space = spaces.Box(0, 10, shape=(10, 10, 3), dtype=np.uint8)
assert not is_image_space(not_image_space)
# Not correct space
not_image_space = spaces.Discrete(n=10)
assert not is_image_space(not_image_space)
an_image_space = spaces.Box(0, 255, shape=(10, 10, 3), dtype=np.uint8)
assert is_image_space(an_image_space, check_channels=False)
assert is_image_space(an_image_space, check_channels=True)
channel_first_image_space = spaces.Box(0, 255, shape=(3, 10, 10), dtype=np.uint8)
assert is_image_space(channel_first_image_space, check_channels=False)
assert is_image_space(channel_first_image_space, check_channels=True)
an_image_space_with_odd_channels = spaces.Box(0, 255, shape=(10, 10, 5), dtype=np.uint8)
assert is_image_space(an_image_space_with_odd_channels)
# Should not pass if we check if channels are valid for an image
assert not is_image_space(an_image_space_with_odd_channels, check_channels=True)
# Test if channel-check works
channel_first_space = spaces.Box(0, 255, shape=(3, 10, 10), dtype=np.uint8)
assert is_image_space_channels_first(channel_first_space)
channel_last_space = spaces.Box(0, 255, shape=(10, 10, 3), dtype=np.uint8)
assert not is_image_space_channels_first(channel_last_space)
channel_mid_space = spaces.Box(0, 255, shape=(10, 3, 10), dtype=np.uint8)
# Should raise a warning
with pytest.warns(Warning):
assert not is_image_space_channels_first(channel_mid_space)
| [
"torch.allclose"
] | 1.8.1 | squalidux/stable-baselines3 | 72690b3ed0635c68f037b3dc121bd9987a6e82a8 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from copy import copy, deepcopy
import numpy as np
import torch
import torch.distributed as torch_distrib
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.step_result import EvalResult, Result
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.trainer.supporters import TensorRunningAccum, Accumulator
from pytorch_lightning.utilities import parsing, AMPType
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_utils import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warning_utils import WarningCache
class TrainLoop:
def __init__(self, trainer):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
def on_trainer_init(
self, max_epochs, min_epochs, max_steps, min_steps, num_sanity_val_steps, automatic_optimization
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
self.trainer.max_epochs = max_epochs
self.trainer.min_epochs = min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
if self.trainer.current_epoch >= self.trainer.max_epochs:
return True
if self.trainer.limit_train_batches == 0:
return True
return False
def on_train_start(self):
# clear cache before training
if self.trainer.on_gpu and self.trainer.root_gpu is not None:
# use context because of:
# https://discuss.pytorch.org/t/out-of-memory-when-i-use-torch-cuda-empty-cache/57898
with torch.cuda.device(f"cuda:{self.trainer.root_gpu}"):
torch.cuda.empty_cache()
# hook
self.trainer.call_hook("on_train_start")
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# bind logger and other properties
self.trainer.model_connector.copy_trainer_model_properties(model)
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
def setup_training(self, model: LightningModule):
"""Sanity check a few things before starting actual training.
Args:
model: The model to run sanity test on.
"""
# --------------------------
# Setup??
# --------------------------
ref_model = model
if self.trainer.data_parallel:
ref_model = model.module
# set the ranks and devices
self.trainer.accelerator_backend.dist.rank = self.trainer.global_rank
self.trainer.accelerator_backend.dist.device = ref_model.device
# give model convenience properties
ref_model.trainer = self.trainer
# set local properties on the model
self.trainer.model_connector.copy_trainer_model_properties(ref_model)
# init amp. Must be done here instead of __init__ to allow ddp to work
if self.trainer.amp_backend == AMPType.NATIVE and self.trainer.precision == 16 and not self.trainer.use_tpu:
self.trainer.scaler = torch.cuda.amp.GradScaler()
# log hyper-parameters
if self.trainer.logger is not None:
# save exp to get started (this is where the first experiment logs are written)
self.trainer.logger.log_hyperparams(ref_model.hparams_initial)
self.trainer.logger.log_graph(ref_model)
self.trainer.logger.save()
# wait for all to join if on distributed
self.trainer.accelerator_backend.barrier("setup_training")
# register auto-resubmit when on SLURM
self.trainer.slurm_connector.register_slurm_signal_handlers()
# --------------------------
# Pre-train
# --------------------------
# on pretrain routine start
self.trainer.on_pretrain_routine_start(ref_model)
if self.trainer.is_function_implemented("on_pretrain_routine_start"):
ref_model.on_pretrain_routine_start()
# print model summary
if self.trainer.is_global_zero and self.trainer.weights_summary is not None and not self.trainer.testing:
if self.trainer.weights_summary in ModelSummary.MODES:
ref_model.summarize(mode=self.trainer.weights_summary)
else:
raise MisconfigurationException("weights_summary can be None, " + ", ".join(ModelSummary.MODES))
# track model now.
# if cluster resets state, the model will update with the saved weights
self.trainer.model = model
# restore training and model before hpc is called
self.trainer.checkpoint_connector.restore_weights(model)
# on pretrain routine end
self.trainer.on_pretrain_routine_end(ref_model)
if self.trainer.is_function_implemented("on_pretrain_routine_end"):
ref_model.on_pretrain_routine_end()
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_save=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# kill loggers
if self.trainer.logger is not None:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator_backend.on_train_end()
# clear mem
if self.trainer.on_gpu:
model = self.trainer.get_model()
model.cpu()
torch.cuda.empty_cache()
def check_checkpoint_callback(self, should_save, is_last=False):
# TODO bake this logic into the checkpoint callback
if should_save and self.trainer.checkpoint_connector.has_trained:
checkpoint_callbacks = [c for c in self.trainer.callbacks if isinstance(c, ModelCheckpoint)]
if is_last and any(c.save_last for c in checkpoint_callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
[c.on_validation_end(self.trainer, model) for c in checkpoint_callbacks]
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# set seed for distributed sampler (enables shuffling for each epoch)
try:
self.trainer.train_dataloader.sampler.set_epoch(epoch)
except Exception:
pass
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, epoch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_batch_end')
self.trainer.call_hook('on_train_batch_end', epoch_end_outputs, batch, batch_idx, dataloader_idx)
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, epoch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, epoch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(epoch_end_outputs):
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
# don't allow EvalResult in the training_step
if isinstance(training_step_output, EvalResult):
raise MisconfigurationException(
"training_step cannot return EvalResult, " "use a dict or TrainResult instead"
)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
with self.trainer.profiler.profile("optimizer_step"):
# optimizer step lightningModule hook
self.trainer.accelerator_backend.optimizer_step(
optimizer, batch_idx, opt_idx, train_step_and_backward_closure
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# get model
model = self.trainer.get_model()
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
# enable profiling for the dataloader
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
# only track outputs when user implements training_epoch_end
# otherwise we will build up unnecessary memory
epoch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, epoch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation(test_mode=False)
# reset stage to train
self.trainer.logger_connector.set_stage("train")
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1:
accumulation_done = self._accumulated_batches_reached()
# Ensure accumulation across batches has completed before breaking loop
if accumulation_done:
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if (batch_idx + 1) >= self.trainer.num_training_batches:
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output,
self.checkpoint_accumulator,
self.early_stopping_accumulator,
self.num_optimizers
)
# when no val loop is present or fast-dev-run still need to call checkpoints
self.check_checkpoint_callback(not (should_check_val or is_overridden('validation_step', model)))
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
using_results_obj = False
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# perform dpp sync only when performing optimizer_step
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch,
batch_idx,
opt_idx,
optimizer,
self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch,
batch_idx,
opt_idx,
self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
self.zero_grad_handler(batch_idx, optimizer, opt_idx)
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# hook + clear gradients
self.zero_grad_handler(batch_idx, optimizer, opt_idx)
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self):
if isinstance(self.trainer.model, torch.nn.parallel.DistributedDataParallel):
yield self.trainer.model.no_sync()
else:
yield
def _process_closure_result(
self, batch_outputs: list, opt_idx: int
) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
self.trainer.call_hook('on_epoch_end')
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.logger_connector.on_train_epoch_end()
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
should_check_val = is_val_check_batch or self.trainer.should_stop
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
should_check_val = can_check_val and (should_check_val or is_last_batch_for_infinite_dataset)
return should_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs or self.trainer.fast_dev_run:
if self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
epoch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if is_overridden("training_epoch_end", model=self.trainer.get_model()) or auto_reduce_tng_result:
epoch_end_outputs.append(optimizer_idx_outputs)
return epoch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
def zero_grad_handler(self, batch_idx, optimizer, opt_idx):
if self.automatic_optimization:
# hook
self.on_before_zero_grad(optimizer)
optimizers = enumerate([optimizer])
else:
optimizers = self.get_optimizers_iterable()
for idx, optimizer in optimizers:
self.optimizer_zero_grad(batch_idx, optimizer, opt_idx)
| [
"torch.cuda.device",
"torch.cuda.empty_cache",
"torch.cuda.amp.GradScaler"
] | 1.3 | songwanguw/pytorch-lightning | 64da9c9d87ac1c106d94310c4d90668fbafbb2cf |
1.10 | """Source code for distributed attentional actor architecture (DA3) model.
Author: Yoshinari Motokawa <[email protected]>
"""
from typing import List
import torch
from core.utils.logging import initialize_logging
from omegaconf import DictConfig
from torch import nn
from ..hard_shrink_attention import HardShrinkBlock
from ..vit import Block, PatchEmbed
logger = initialize_logging(__name__)
class DA3(nn.Module):
def __init__(self, config: DictConfig, input_shape: List[int], output_size: int):
super().__init__()
patched_size_x = input_shape[1] // config.model.patch_size
patched_size_y = input_shape[2] // config.model.patch_size
self.view_method = config.observation_area_mask
self.patch_embed = PatchEmbed(
patch_size=config.model.patch_size,
in_chans=input_shape[0],
embed_dim=config.model.embed_dim,
)
self.saliency_vector = nn.Parameter(torch.zeros(1, 1, config.model.embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, patched_size_x * patched_size_y + 1, config.model.embed_dim)
)
block = HardShrinkBlock if config.model.attention == "hard" else Block
self.blocks = nn.ModuleList(
[
block(
dim=config.model.embed_dim,
num_heads=config.model.num_heads,
mlp_ratio=config.model.mlp_ratio,
**{"af_lambd": config.model.af_lambd}
)
for _ in range(config.model.block_loop)
]
)
self.norm = nn.LayerNorm(config.model.embed_dim)
self.head = nn.Linear(config.model.embed_dim, output_size)
def forward(self, state):
x = self.state_encoder(state)
out = self.patch_embed(x)
saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)
out = torch.cat((saliency_vector, out), dim=1)
out = out + self.pos_embed
for blk in self.blocks:
out = blk(out)
out = self.norm(out)
out = out[:, 0]
out = self.head(out)
return out
def forward_attn(self, state):
x = self.state_encoder(state)
out = self.patch_embed(x)
saliency_vector = self.saliency_vector.expand(out.shape[0], -1, -1)
out = torch.cat((saliency_vector, out), dim=1)
out = out + self.pos_embed
attns = list()
for blk in self.blocks:
out, attn = blk.forward_attn(out)
attns.append(attn.detach())
out = self.norm(out)
out = out[:, 0]
out = self.head(out)
return out, [attns]
def state_encoder(self, state):
return state[self.view_method]
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LayerNorm",
"torch.zeros"
] | 1.10.0 | Yoshi-0921/MAEXP | cc03fdd46db9b1838df8f7782b4bd1b2bb3f11d5 |
1.3 | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is the class for cyclesr trainworker."""
import datetime
import logging
import itertools
import os
import time
import numpy as np
import torch
import json
from tensorboardX import SummaryWriter
import vega
from vega.datasets import Adapter
from vega.datasets.common.dataset import Dataset
from vega.common import FileOps
from vega.report import ReportClient
from vega.common import ClassFactory, ClassType
from vega.networks.network_desc import NetworkDesc
from vega.trainer.callbacks import Callback
from .utils import AverageMeter
from .utils import TensorNorm
try:
import horovod.torch as hvd
except Exception:
# logging.warning("horovod not been installed, {}".format(str(e)))
pass
# data-processing module
from .utils import find_best_PSNR
@ClassFactory.register(ClassType.CALLBACK)
class CyclesrTrainerCallback(Callback):
"""A special callback for Trainer."""
disable_callbacks = ["ModelStatistics", "MetricsEvaluator", "ModelCheckpoint", "PerformanceSaver",
"LearningRateScheduler", "ProgressLogger", "ReportCallback", "ModelBuilder"]
def __init__(self):
"""Initialize method."""
super(CyclesrTrainerCallback, self).__init__()
def set_trainer(self, trainer):
"""Set trainer object for current callback."""
self.trainer = trainer
self.trainer._train_loop = self._train_loop
self.cfg = self.trainer.config
self._worker_id = self.trainer._worker_id
self.worker_path = self.trainer.get_local_worker_path()
self.output_path = self.trainer.local_output_path
self.best_model_name = "model_best"
self.best_model_file = FileOps.join_path(
self.worker_path, "model_{}.pth".format(self.trainer.worker_id))
def _init_dataloader(self, mode):
"""Decode train dataset and validation dataset.
:return: train dataset and validataion dataset
:rtype: tuple of torch.utils.data.Dataset
"""
dataset = Dataset(mode=mode)
if self.cfg.distributed:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=hvd.size(), rank=hvd.rank())
dataset.sampler = sampler
return dataset
def _init_model(self):
"""Initialize the model architecture for full train step.
:return: train model
:rtype: class
"""
logging.info('Initializing model')
if self.cfg.model_desc:
logging.debug("model_desc: {}".format(self.cfg.model_desc))
_file = FileOps.join_path(self.worker_path, "model_desc_{}.json".format(self._worker_id))
with open(_file, "w") as f:
json.dump(self.cfg.model_desc, f)
if self.cfg.distributed:
hvd.join()
model_desc = self.cfg.model_desc
net_desc = NetworkDesc(model_desc)
model = net_desc.to_model()
return model
else:
return None
def batch_psnr(self, HR, SR):
"""Calculate the mean psnr in a batch.
:param HR: HR image
:type HR: torch FloatTensor
:param SR: SR image
:type SR: torch FloatTensor
:return: mean psnr in a batch
:rtype: Float
"""
psnr = 20 * torch.log10(1 / torch.sqrt(torch.mean((HR - SR) ** 2, [1, 2, 3])))
psnr = psnr.mean().item()
return psnr
def _train(self, trainloader, writer, epoch, model, print_freq=10):
"""Train process.
:param trainloader: train dataset
:type trainloader: torch.utils.data.DataLoader
:param writer: record enent files to log dir
:type writer: tensorboardX.SummaryWriter
:param epoch: current epoch
:type epoch: int
:param model: cyclesr model with train mode
:type model: CycleSRModel class(nn.Module)
:param print_freq: frequency of showing training results on console
:type print_freq: int
"""
loss_sr = AverageMeter()
loss_ga = AverageMeter()
loss_cycA = AverageMeter()
PSNRes = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
num_batches = len(trainloader)
for batch_idx, data in enumerate(trainloader):
model.set_mode('train')
step = epoch * num_batches + batch_idx
data_time.update(time.time() - end)
#######################################################################
model.optimize_CycleSR(data, epoch)
# caclute psnr during training
losses = model.get_current_losses()
for name, loss in losses.items():
writer.add_scalar("loss" + name, loss, step) # store the loss in tensorboardX
batchsize = data['X'].size(0)
loss_sr.update(losses['SR'], batchsize)
loss_ga.update(losses['G'], batchsize)
loss_cycA.update(losses['rec_X'], batchsize)
# logging.info("HR: {}. SR: {}".format(model.HR.data))
if epoch < 6:
psnr = self.batch_psnr(model.HR.data, model.G_SR.data)
else:
psnr = self.batch_psnr(model.HR.data, model.SR.data)
PSNRes.update(psnr, batchsize)
writer.add_scalar("training_psnr", psnr, step) # store the psnr
batch_time.update(time.time() - end)
# print result
if (batch_idx + 1) % print_freq == 0:
if not vega.is_gpu_device() or (vega.is_gpu_device() and self.trainer.is_chief):
logging.info('[epoch {0},iter {1}/{2}]\t'
'Time {batch_time.val:.3f}({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f}({data_time.avg:.3f})\t'
'SR MSE {mse.val:.5f}({mse.avg:.5f})\t'
'psnr {psnr.val:.3f}({psnr.avg:.3f})\t'
'G_A {loss_ga.val:.5f}({loss_ga.avg:.5f})\t'
'Cycle_A {loss_cycA.val:.5f}({loss_cycA.avg:.5f})'
.format(epoch, batch_idx + 1, num_batches, batch_time=batch_time, data_time=data_time,
mse=loss_sr, psnr=PSNRes, loss_ga=loss_ga, loss_cycA=loss_cycA))
end = time.time()
def getValImg(self, dataset, val_num=5):
"""Get val_num images for showing outputs of cycleGAN during training.
:param dataset: valid dataset
:type dataset: torch.utils.data.Dataset
:param val_num: number of selected images, defualt: 5
:type val_num: int
:return: list of selected valid images
:rtype: list
"""
val_imgs = []
for i in range(val_num):
img = dataset[(i * (len(dataset) - 1)) // 5]
img["X"] = torch.unsqueeze(img['X'], 0)
img['Y'] = torch.unsqueeze(img['Y'], 0)
img['HR'] = torch.unsqueeze(img['HR'], 0)
val_imgs.append(img)
return val_imgs
def _evalGAN(self, model, imgs, epoch, writer):
"""Save images to event file.
:param model: cyclesr model
:type model: CycleSRModel class(nn.Module)
:param imgs: list of selected valid images
:type imgs: list
:param epoch: current epoch
:type epoch: int
:param writer: record enent files to log dir
:type writer: tensorboardX.SummaryWriter
"""
model.set_mode('eval')
with torch.no_grad():
for i, img in enumerate(imgs):
if vega.is_npu_device():
real_X = img['X'].npu()
real_Y = img['Y'].npu()
HR = img['HR'].npu()
else:
real_X = img['X'].cuda()
real_Y = img['Y'].cuda()
HR = img['HR'].cuda()
fake_Y = model.netG(real_X) # G(X)
rec_X = model.netF(fake_Y) # F(G(X))
fake_X = model.netF(real_Y) # F(Y)
rec_Y = model.netG(fake_X) # G(F(Y))
G_SR = model.netSR(fake_Y) # SR(G(X))
writer.add_image("G_SR" + str(i), TensorNorm((G_SR[0])), epoch)
writer.add_image("HR" + str(i), TensorNorm((HR[0])), epoch)
writer.add_image("Real_bicubic" + str(i), TensorNorm((real_X[0])), epoch)
writer.add_image("Fake_unknown" + str(i), TensorNorm((fake_Y[0])), epoch)
writer.add_image("Real_unknown" + str(i), TensorNorm((real_Y[0])), epoch)
writer.add_image("Fake_bicubic" + str(i), TensorNorm((fake_X[0])), epoch)
writer.add_image("Rec_bicubic" + str(i), TensorNorm((rec_X[0])), epoch)
writer.add_image("Rec_unknown" + str(i), TensorNorm((rec_Y[0])), epoch)
def _valid(self, model, val_dataloader, epoch, eval_epoch, writer, ps_offset=10, val_sr_num=20):
"""Validate process of cyclesr.
:param model: cyclesr model
:type model: CycleSRModel class(nn.Module)
:param val_dataloader: validate dataset
:type val_dataloader: torch.utils.data.DataLoader
:param epoch: current epoch
:type epoch: int
:param eval_epoch: frequency of evaluation
:type eval_epoch: int
:param writer: record enent files to log dir
:type writer: tensorboardX.SummaryWriter
:param ps_offset: pixel offset when calculating psnr during evaluation, default: 10
:type ps_offset: int
:param val_sr_num: number of selected images for testing sr model
:type val_sr_num: int
:return: mean psnr of whole validation images or None
:rtype: int or None
"""
SRnet = model.netSR
SRnet.eval()
val_PSNR = []
with torch.no_grad():
for i, data in enumerate(val_dataloader):
val_LR = data['Y']
if "HR" in data.keys():
HR = data['HR']
else:
HR = None
if vega.is_npu_device():
SR = SRnet(val_LR.npu())
else:
SR = SRnet(val_LR.cuda())
SR = torch.clamp(SR, 0.0, 1.0)
if i < val_sr_num:
if i == 0:
logging.info('Saving real LR test images to tensorboard......')
writer.add_image("Val_SR" + str(i), TensorNorm((SR)), epoch)
if epoch == eval_epoch:
writer.add_image('Val_LR' + str(i), TensorNorm((val_LR)), epoch)
if HR is not None:
writer.add_image('Val_HR' + str(i), TensorNorm((HR)), epoch)
if i == val_sr_num - 1:
logging.info('***** Save Done! *****')
else:
if HR is None:
return None
if vega.is_npu_device():
val_PSNR.append(find_best_PSNR(HR.npu(), SR, ps_offset) if HR is not None else None)
else:
val_PSNR.append(find_best_PSNR(HR.cuda(), SR, ps_offset) if HR is not None else None)
if all(val_PSNR):
ave_PSNR = np.asarray(val_PSNR).mean()
else:
ave_PSNR = None
return ave_PSNR
def _train_loop(self):
"""Whole train and validate process for the fully train cyclesr."""
self._init_report()
if not vega.is_cpu_device():
self.trainer._init_setting()
self.model = self._init_model()
if self.cfg.distributed:
self._horovod_init_optimizer()
self._init_horovod_setting()
self.train_data = self._init_dataloader('train')
self.valid_data = self._init_dataloader('test')
train_dataloader = Adapter(self.train_data).loader
valid_dataloader = Adapter(self.valid_data).loader
writer = SummaryWriter(self.worker_path)
start_time = time.time()
train_time = 0
best_psnr = -np.inf
best_epoch = 0
logging.info("==> Start training")
val_gan_imgs = self.getValImg(self.train_data, val_num=5)
for epoch in range(self.cfg.epoch_count, self.cfg.n_epoch + self.cfg.n_epoch_decay + 1):
self.model.update_learning_rate(
epoch,
self.cfg.model_desc.custom.cyc_lr,
self.cfg.model_desc.custom.SR_lr,
self.cfg.n_epoch,
self.cfg.n_epoch_decay)
start_train_time = time.time()
self._train(train_dataloader, writer, epoch, self.model, print_freq=self.cfg.print_freq)
train_time += round(time.time() - start_train_time)
# validation
###############################################################################
if epoch % self.cfg.eval_epoch == 0:
logging.info("==> Validng")
self._evalGAN(self.model, val_gan_imgs, epoch, writer)
val_ave_psnr = self._valid(self.model, valid_dataloader, epoch, self.cfg.eval_epoch, writer,
self.cfg.val_ps_offset)
if val_ave_psnr is not None:
logging.info("==> Current ave psnr is {:.3f}".format(val_ave_psnr))
if val_ave_psnr > best_psnr:
best_psnr = val_ave_psnr
best_epoch = epoch
logging.info(
"==> Best PSNR on val dataset {:.3f}, achieved at epoch {}".format(best_psnr, best_epoch))
self._save_checkpoint(epoch, best=True)
self._update_report(epoch, {"psnr": val_ave_psnr})
model_name = 'epoch' + str(epoch)
logging.info("Saving checkpoints to {}".format(model_name))
self._save_checkpoint(epoch)
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
train_time = str(datetime.timedelta(seconds=train_time))
logging.info("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time))
def _save_checkpoint(self, epoch, best=False):
"""Save model weights.
:param epoch: current epoch
:type epoch: int
"""
save_dir = os.path.join(self.worker_path, str(epoch))
FileOps.make_dir(save_dir)
for name in self.model.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = FileOps.join_path(save_dir, save_filename)
net = getattr(self.model, 'net' + name)
best_file = FileOps.join_path(
self.worker_path,
"model_{}.pth".format(name))
if vega.is_gpu_device() and torch.cuda.is_available():
# torch.save(net.module.cpu().state_dict(), save_path)
torch.save(net.module.state_dict(), save_path)
# net.cuda()
if best:
torch.save(net.module.state_dict(), best_file)
elif vega.is_npu_device():
torch.save(net.state_dict(), save_path)
if best:
torch.save(net.state_dict(), best_file)
else:
torch.save(net.cpu().state_dict(), save_path)
if best:
torch.save(net.cpu().state_dict(), best_file)
def _init_horovod_setting(self):
"""Init horovod setting."""
self.is_chief = True
# SR
hvd.broadcast_parameters(self.model.netSR.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.model.optimizer_SR, root_rank=0)
# G F
hvd.broadcast_parameters(self.model.netG.state_dict(), root_rank=0)
hvd.broadcast_parameters(self.model.netF.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.model.optimizer_G, root_rank=0)
# D_X
hvd.broadcast_parameters(self.model.netD_X.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.model.optimizer_D_X, root_rank=0)
# D_Y
hvd.broadcast_parameters(self.model.netD_Y.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.model.optimizer_D_Y, root_rank=0)
if hvd.rank() != 0:
self.is_chief = False
else:
self.is_chief = True
def _horovod_init_optimizer(self):
# SR optimizer
self.model.optimizer_SR = hvd.DistributedOptimizer(
self.model.optimizer_SR,
named_parameters=self.model.netSR.named_parameters(),
compression=hvd.Compression.none
)
# G optimizer
self.model.optimizer_G = hvd.DistributedOptimizer(
self.model.optimizer_G,
named_parameters=itertools.chain(self.model.netG.named_parameters(), self.model.netF.named_parameters()),
compression=hvd.Compression.none
)
# D_X optimizer
self.model.optimizer_D_X = hvd.DistributedOptimizer(
self.model.optimizer_D_X,
named_parameters=self.model.netD_X.named_parameters(),
compression=hvd.Compression.none
)
# D_Y optimizer
self.model.optimizer_D_Y = hvd.DistributedOptimizer(
self.model.optimizer_D_Y,
named_parameters=self.model.netD_Y.named_parameters(),
compression=hvd.Compression.none
)
def _init_report(self):
record = ReportClient().update(
worker_id=self.trainer.worker_id,
desc=self.cfg.model_desc,
step_name=self.trainer.step_name,
weights_file=self.best_model_file)
logging.debug("update record=%s", str(record))
def _update_report(self, epoch, performance):
record = ReportClient().update(
self.trainer.step_name,
self.trainer.worker_id,
performance=performance)
logging.debug("report_callback record: {}".format(record))
| [
"torch.no_grad",
"torch.clamp",
"torch.unsqueeze",
"torch.cuda.is_available",
"torch.mean"
] | 1.3.0 | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae |
1.3 | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for Cutout."""
import numpy as np
import torch
from .ops import int_parameter
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class Cutout(object):
"""Cutout for an image."""
def __init__(self, length):
"""Construct the Cutout class."""
self.length = int_parameter(length, 20)
def __call__(self, img):
"""Cutout for an image.
:param img: An image
:type img: Tensor
"""
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
| [
"torch.from_numpy"
] | 1.3.0 | jie311/vega | 1bba6100ead802697e691403b951e6652a99ccae |
1.1 | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from sklearn.utils import class_weight
from utils.lovasz_losses import lovasz_softmax
import pdb
def make_one_hot(labels, classes):
one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)
target = one_hot.scatter_(1, labels.data, 1)
return target
def get_weights(target):
t_np = target.view(-1).data.cpu().numpy()
classes, counts = np.unique(t_np, return_counts=True)
cls_w = np.median(counts) / counts
#cls_w = class_weight.compute_class_weight('balanced', classes, t_np)
weights = np.ones(7)
weights[classes] = cls_w
return torch.from_numpy(weights).float().cuda()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, ignore_index=255, reduction='mean'):
super(CrossEntropyLoss2d, self).__init__()
self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, output, target):
loss = self.CE(output, target)
return loss
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=255):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1-pt)**self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
class CE_DiceLoss(nn.Module):
def __init__(self, smooth=1, reduction='mean', ignore_index=255, weight=None):
super(CE_DiceLoss, self).__init__()
self.smooth = smooth
self.dice = DiceLoss()
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)
def forward(self, output, target):
CE_loss = self.cross_entropy(output, target)
dice_loss = self.dice(output, target)
return CE_loss + dice_loss
class LovaszSoftmax(nn.Module):
def __init__(self, classes='present', per_image=False, ignore_index=255):
super(LovaszSoftmax, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
return loss
| [
"torch.from_numpy",
"torch.nn.functional.softmax",
"torch.exp",
"torch.nn.CrossEntropyLoss"
] | 1.1.0 | 87003697/Segmentation | 5973a64768632fc52c55f9ffc9f0b43746699b37 |
1.10 | import pandas as pd
import torch
from transformers import BertJapaneseTokenizer
from wtfml.data_loaders.nlp.utils import clean_sentence
import transformers
class BERTSimpleDataset:
"""
Dataset for bert which can accept clearning function
"""
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = BertJapaneseTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144 # twitter
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
# return_tensors="pt"
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long), # floatからlongに変更
}
class DistilBERTDataset:
"""
Dataset for bert which can accept clearning function
"""
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144 # twitter
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
# return_tensors="pt"
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# token_type_ids = inputs["token_type_ids"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long), # floatからlongに変更
}
| [
"torch.tensor"
] | 1.10.0 | jphacks/C_2111 | df87580614d7e5c225ea30746e5f2cd0576bbc98 |
1.4 | """
Implementation from: https://raw.githubusercontent.com/Zenglinxiao/OpenNMT-py/bert/onmt/encoders/bert.py
@Author: Zenglinxiao
"""
import torch.nn as nn
from onmt.encoders.transformer import TransformerEncoderLayer
from onmt.utils.misc import sequence_mask
class BertEncoder(nn.Module):
"""BERT Encoder: A Transformer Encoder with LayerNorm and BertPooler.
:cite:`DBLP:journals/corr/abs-1810-04805`
Args:
embeddings (onmt.modules.BertEmbeddings): embeddings to use
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
"""
def __init__(self, embeddings, num_layers=12, d_model=768, heads=12,
d_ff=3072, dropout=0.1, attention_dropout=0.1,
max_relative_positions=0):
super(BertEncoder, self).__init__()
self.num_layers = num_layers
self.d_model = d_model
self.heads = heads
self.dropout = dropout
# Feed-Forward size should be 4*d_model as in paper
self.d_ff = d_ff
self.embeddings = embeddings
# Transformer Encoder Block
self.encoder = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff,
dropout, attention_dropout,
max_relative_positions=max_relative_positions,
activation='gelu') for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-12)
self.pooler = BertPooler(d_model)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
embeddings,
opt.enc_layers,
opt.word_vec_size,
opt.heads,
opt.transformer_ff,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.attention_dropout,
opt.max_relative_positions
)
def forward(self, input_ids, lengths, token_type_ids=None):
"""
Args:
input_ids (Tensor): ``(seq_len, batch_size, feature_dim)``, padding ids=0
lengths (Tensor): ``(batch_size)``, record length of sequence
token_type_ids (seq_len, batch_size): ``(B, S)``, A(0), B(1), pad(0)
Returns:
all_encoder_layers (list of Tensor): ``(B, S, H)``, token level
pooled_output (Tensor): ``(B, H)``, sequence level
"""
# remove the feature dimension
# seq_len x batch_size
emb = self.embeddings(input_ids, token_type_ids)
out = emb.transpose(0, 1).contiguous()
# [batch, seq] -> [batch, 1, seq]
mask = ~sequence_mask(lengths).unsqueeze(1)
for layer in self.encoder:
out = layer(out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous(), lengths
def update_dropout(self, dropout):
self.dropout = dropout
self.embeddings.update_dropout(dropout)
for layer in self.encoder:
layer.update_dropout(dropout)
class BertPooler(nn.Module):
def __init__(self, hidden_size):
"""A pooling block (Linear layer followed by Tanh activation).
Args:
hidden_size (int): size of hidden layer.
"""
super(BertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation_fn = nn.Tanh()
def forward(self, hidden_states):
"""hidden_states[:, 0, :] --> {Linear, Tanh} --> Returns.
Args:
hidden_states (Tensor): last layer's hidden_states, ``(B, S, H)``
Returns:
pooled_output (Tensor): transformed output of last layer's hidden
"""
first_token_tensor = hidden_states[:, 0, :] # [batch, d_model]
pooled_output = self.activation_fn(self.dense(first_token_tensor))
return pooled_output | [
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.nn.LayerNorm"
] | 1.4.0 | SivilTaram/dialogue-utterance-rewriter-pytorch | 92c2254958b7a1ee9199836f7f2236575270983f |
1.4 | #!/usr/bin/env python
""" Convert weights of huggingface Bert to onmt Bert"""
from argparse import ArgumentParser
import torch
from onmt.encoders.bert import BertEncoder
from onmt.models.bert_generators import BertPreTrainingHeads
from onmt.modules.bert_embeddings import BertEmbeddings
from collections import OrderedDict
import re
def decrement(matched):
value = int(matched.group(1))
if value < 1:
raise ValueError('Value Error when converting string')
string = "bert.encoder.layer.{}.output.LayerNorm".format(value-1)
return string
def mapping_key(key, max_layers):
if 'bert.embeddings' in key:
key = key
elif 'bert.encoder' in key:
# convert layer_norm weights
key = re.sub(r'bert.encoder.0.layer_norm\.(.*)',
r'bert.embeddings.LayerNorm.\1', key)
key = re.sub(r'bert.encoder\.(\d+)\.layer_norm',
decrement, key)
# convert attention weights
key = re.sub(r'bert.encoder\.(\d+)\.self_attn.linear_keys\.(.*)',
r'bert.encoder.layer.\1.attention.self.key.\2', key)
key = re.sub(r'bert.encoder\.(\d+)\.self_attn.linear_values\.(.*)',
r'bert.encoder.layer.\1.attention.self.value.\2', key)
key = re.sub(r'bert.encoder\.(\d+)\.self_attn.linear_query\.(.*)',
r'bert.encoder.layer.\1.attention.self.query.\2', key)
key = re.sub(r'bert.encoder\.(\d+)\.self_attn.final_linear\.(.*)',
r'bert.encoder.layer.\1.attention.output.dense.\2', key)
# convert feed forward weights
key = re.sub(r'bert.encoder\.(\d+)\.feed_forward.layer_norm\.(.*)',
r'bert.encoder.layer.\1.attention.output.LayerNorm.\2',
key)
key = re.sub(r'bert.encoder\.(\d+)\.feed_forward.w_1\.(.*)',
r'bert.encoder.layer.\1.intermediate.dense.\2', key)
key = re.sub(r'bert.encoder\.(\d+)\.feed_forward.w_2\.(.*)',
r'bert.encoder.layer.\1.output.dense.\2', key)
elif 'bert.layer_norm' in key:
key = re.sub(r'bert.layer_norm',
r'bert.encoder.layer.' + str(max_layers - 1) +
'.output.LayerNorm', key)
elif 'bert.pooler' in key:
key = key
elif 'generator.next_sentence' in key:
key = re.sub(r'generator.next_sentence.linear\.(.*)',
r'cls.seq_relationship.\1', key)
elif 'generator.mask_lm' in key:
key = re.sub(r'generator.mask_lm.bias',
r'cls.predictions.bias', key)
key = re.sub(r'generator.mask_lm.decode.weight',
r'cls.predictions.decoder.weight', key)
key = re.sub(r'generator.mask_lm.transform.dense\.(.*)',
r'cls.predictions.transform.dense.\1', key)
key = re.sub(r'generator.mask_lm.transform.layer_norm\.(.*)',
r'cls.predictions.transform.LayerNorm.\1', key)
else:
raise KeyError("Unexpected keys! Please provide HuggingFace weights")
return key
def convert_bert_weights(bert_model, weights, n_layers=12):
bert_model_keys = bert_model.state_dict().keys()
bert_weights = OrderedDict()
generator_weights = OrderedDict()
model_weights = {"bert": bert_weights,
"generator": generator_weights}
hugface_keys = weights.keys()
try:
for key in bert_model_keys:
hugface_key = mapping_key(key, n_layers)
if hugface_key not in hugface_keys:
if 'LayerNorm' in hugface_key:
# Fix LayerNorm of old huggingface ckp
hugface_key = re.sub(r'LayerNorm.weight',
r'LayerNorm.gamma', hugface_key)
hugface_key = re.sub(r'LayerNorm.bias',
r'LayerNorm.beta', hugface_key)
if hugface_key in hugface_keys:
print("[OLD Weights file]gamma/beta is used in " +
"naming BertLayerNorm. Mapping succeed.")
else:
raise KeyError("Failed fix LayerNorm %s, check file"
% hugface_key)
else:
raise KeyError("Mapped key %s not in weight file"
% hugface_key)
if 'generator' not in key:
onmt_key = re.sub(r'bert\.(.*)', r'\1', key)
model_weights['bert'][onmt_key] = weights[hugface_key]
else:
onmt_key = re.sub(r'generator\.(.*)', r'\1', key)
model_weights['generator'][onmt_key] = weights[hugface_key]
except KeyError:
print("Unsuccessful convert.")
raise
return model_weights
def main():
parser = ArgumentParser()
parser.add_argument("--layers", type=int, default=12, required=True)
parser.add_argument("--bert_model_weights_file", "-i", type=str, default="bert-base-chinese/pytorch_model.bin",
required=True, help="Path to the "
"huggingface Bert weights file download from "
"https://github.com/huggingface/pytorch-transformers")
parser.add_argument("--output_name", "-o", type=str, required=True, default="bert-base-chinese/onmt_bert.pt",
help="output onmt version Bert weight file Path")
args = parser.parse_args()
print("Model contain {} layers.".format(args.layers))
print("Load weights from {}.".format(args.bert_model_weights_file))
bert_weights = torch.load(args.bert_model_weights_file)
embeddings = BertEmbeddings(28996) # vocab don't bother the conversion
bert_encoder = BertEncoder(embeddings)
generator = BertPreTrainingHeads(bert_encoder.d_model,
embeddings.vocab_size)
bertlm = torch.nn.Sequential(OrderedDict([
('bert', bert_encoder),
('generator', generator)]))
model_weights = convert_bert_weights(bertlm, bert_weights, args.layers)
ckp = {'model': model_weights['bert'],
'generator': model_weights['generator']}
outfile = args.output_name
print("Converted weights file in {}".format(outfile))
torch.save(ckp, outfile)
if __name__ == '__main__':
main()
| [
"torch.save",
"torch.load"
] | 1.4.0 | SivilTaram/dialogue-utterance-rewriter-pytorch | 92c2254958b7a1ee9199836f7f2236575270983f |
1.4 | #!/usr/bin/env python
"""Training on a single process."""
import os
import torch
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from onmt.model_builder import build_model
from onmt.utils.optimizers import Optimizer
from onmt.utils.misc import set_random_seed
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
from onmt.utils.parse import ArgumentParser
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return enc + dec, enc, dec
def configure_process(opt, device_id):
if device_id >= 0:
torch.cuda.set_device(device_id)
set_random_seed(opt.seed, device_id >= 0)
def main(opt, device_id, batch_queue=None, semaphore=None):
# NOTE: It's important that ``opt`` has been validated and updated
# at this point.
configure_process(opt, device_id)
init_logger(opt.log_file)
assert len(opt.accum_count) == len(opt.accum_steps), \
'Number of accum_count values must match number of accum_steps'
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
if 'opt' in checkpoint:
model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
else:
model_opt = opt
if 'vocab' in checkpoint:
logger.info('Loading vocab from checkpoint at %s.', opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
else:
checkpoint = None
model_opt = opt
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# Report src and tgt vocab sizes, including for features
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)
# Build model saver
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(
opt, device_id, model, fields, optim, model_saver=model_saver)
if batch_queue is None:
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
else:
assert semaphore is not None, \
"Using batch_queue requires semaphore as well"
def _train_iter():
while True:
batch = batch_queue.get()
semaphore.release()
yield batch
train_iter = _train_iter()
valid_iter = build_dataset_iter(
"valid", fields, opt, is_train=False)
if len(opt.gpu_ranks):
logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
else:
logger.info('Starting training on CPU, could be very slow')
train_steps = opt.train_steps
if opt.single_pass and train_steps > 0:
logger.warning("Option single_pass is enabled, ignoring train_steps.")
train_steps = 0
trainer.train(
train_iter,
train_steps,
save_checkpoint_steps=opt.save_checkpoint_steps,
valid_iter=valid_iter,
valid_steps=opt.valid_steps)
if trainer.report_manager.tensorboard_writer is not None:
trainer.report_manager.tensorboard_writer.close()
| [
"torch.cuda.set_device",
"torch.load"
] | 1.4.0 | SivilTaram/dialogue-utterance-rewriter-pytorch | 92c2254958b7a1ee9199836f7f2236575270983f |
1.0 | """
Here come the tests for attention types and their compatibility
"""
import unittest
import torch
from torch.autograd import Variable
import onmt
class TestAttention(unittest.TestCase):
def test_masked_global_attention(self):
source_lengths = torch.IntTensor([7, 3, 5, 2])
# illegal_weights_mask = torch.ByteTensor([
# [0, 0, 0, 0, 0, 0, 0],
# [0, 0, 0, 1, 1, 1, 1],
# [0, 0, 0, 0, 0, 1, 1],
# [0, 0, 1, 1, 1, 1, 1]])
batch_size = source_lengths.size(0)
dim = 20
memory_bank = Variable(torch.randn(batch_size,
source_lengths.max(), dim))
hidden = Variable(torch.randn(batch_size, dim))
attn = onmt.modules.Attention(dim)
_, alignments = attn(hidden, memory_bank,
memory_lengths=source_lengths)
# TODO: fix for pytorch 0.3
# illegal_weights = alignments.masked_select(illegal_weights_mask)
# self.assertEqual(0.0, illegal_weights.data.sum())
| [
"torch.IntTensor",
"torch.randn"
] | 1.0 | deep-spin/SIGMORPHON2019 | 60cf3b53be42e76238e7928405b2916cd9aed6c4 |
1.0 | import torch
import torch.nn as nn
from torch.autograd import Function
from onmt.utils.misc import aeq as assert_equal
from onmt.modules.sparse_activations import sparsemax
def _fy_backward(ctx, grad_output):
p_star, = ctx.saved_tensors
grad = grad_output.unsqueeze(1) * p_star
return grad
def _omega_sparsemax(p_star):
return (1 - (p_star ** 2).sum(dim=1)) / 2
class SparsemaxLossFunction(Function):
@classmethod
def forward(cls, ctx, input, target):
"""
input (FloatTensor): n x num_classes
target (LongTensor): n, the indices of the target classes
"""
assert_equal(input.shape[0], target.shape[0])
p_star = sparsemax(input, 1)
cls.p_star = p_star.clone().detach()
loss = _omega_sparsemax(p_star)
p_star.scatter_add_(1, target.unsqueeze(1),
torch.full_like(p_star, -1))
loss += torch.einsum("ij,ij->i", p_star, input)
ctx.save_for_backward(p_star)
return loss
@staticmethod
def backward(ctx, grad_output):
return _fy_backward(ctx, grad_output), None
sparsemax_loss = SparsemaxLossFunction.apply
class SparsemaxLoss(nn.Module):
def __init__(self, weight=None, ignore_index=-100,
reduction='elementwise_mean'):
assert reduction in ['elementwise_mean', 'sum', 'none']
self.reduction = reduction
self.weight = weight
self.ignore_index = ignore_index
super(SparsemaxLoss, self).__init__()
def forward(self, input, target):
loss = sparsemax_loss(input, target)
if self.ignore_index >= 0:
ignored_positions = target == self.ignore_index
size = float((target.size(0) - ignored_positions.sum()).item())
loss.masked_fill_(ignored_positions, 0.0)
else:
size = float(target.size(0))
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'elementwise_mean':
loss = loss.sum() / size
return loss, SparsemaxLossFunction.p_star
| [
"torch.einsum",
"torch.full_like"
] | 1.0 | deep-spin/SIGMORPHON2019 | 60cf3b53be42e76238e7928405b2916cd9aed6c4 |
1.1 | import torch
import numpy as np
import smplx
from smplx import SMPL as _SMPL
from smplx.body_models import ModelOutput
from smplx.lbs import vertices2joints
import spin.config as config
import spin.constants as constants
class SMPL(_SMPL):
""" Extension of the official SMPL implementation to support more joints """
def __init__(self, *args, **kwargs):
super(SMPL, self).__init__(*args, **kwargs)
joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]
J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA)
self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
self.joint_map = torch.tensor(joints, dtype=torch.long)
def forward(self, *args, **kwargs):
kwargs['get_skin'] = True
smpl_output = super(SMPL, self).forward(*args, **kwargs)
extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
joints = smpl_output.joints
# print(smpl_output.joints.shape)
# joints = joints[:, self.joint_map, :]
output = ModelOutput(vertices=smpl_output.vertices,
global_orient=smpl_output.global_orient,
body_pose=smpl_output.body_pose,
joints=joints,
betas=smpl_output.betas,
full_pose=smpl_output.full_pose)
return output
| [
"torch.cat",
"torch.tensor"
] | 1.1.0 | krumo/SPIN | 0e2f17e70f06de46e062683ea6d5b233eeaa73c1 |
0.4 | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.nn as nn
import torch.utils.data
import numpy as np
from opt import opt
from dataloader import VideoLoader, DetectionLoader, DetectionProcessor, DataWriter, Mscoco
from yolo.util import write_results, dynamic_write_results
from SPPE.src.main_fast_inference import *
import ntpath
import os
import sys
from tqdm import tqdm
import time
from fn import getTime
import cv2
from pPose_nms import pose_nms, write_json
args = opt
args.dataset = 'coco'
if not args.sp:
torch.multiprocessing.set_start_method('forkserver', force=True)
torch.multiprocessing.set_sharing_strategy('file_system')
if __name__ == "__main__":
videofile = args.video
mode = args.mode
if not os.path.exists(args.outputpath):
os.mkdir(args.outputpath)
if not len(videofile):
raise IOError('Error: must contain --video')
# Load input video
data_loader = VideoLoader(videofile, batchSize=args.detbatch).start()
(fourcc,fps,frameSize) = data_loader.videoinfo()
# Load detection loader
print('Loading YOLO model..')
sys.stdout.flush()
det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
det_processor = DetectionProcessor(det_loader).start()
# Load pose model
pose_dataset = Mscoco()
if args.fast_inference:
print('Using fast inference...')
pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
else:
print('Using slow, more accurate inference...')
pose_model = InferenNet(4 * 1 + 1, pose_dataset)
pose_model
pose_model.eval()
runtime_profile = {
'dt': [],
'pt': [],
'pn': []
}
# Data writer
save_path = os.path.join(args.outputpath, 'AlphaPose_'+ntpath.basename(videofile).split('.')[0]+'.avi')
writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
im_names_desc = tqdm(range(data_loader.length()))
batchSize = args.posebatch
for i in im_names_desc:
start_time = getTime()
with torch.no_grad():
(inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
if orig_img is None:
break
if boxes is None or boxes.nelement() == 0:
writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
continue
ckpt_time, det_time = getTime(start_time)
runtime_profile['dt'].append(det_time)
# Pose Estimation
datalen = inps.size(0)
leftover = 0
if (datalen) % batchSize:
leftover = 1
num_batches = datalen // batchSize + leftover
hm = []
for j in range(num_batches):
inps_j = inps[j*batchSize:min((j + 1)*batchSize, datalen)]
hm_j = pose_model(inps_j)
hm.append(hm_j)
hm = torch.cat(hm)
ckpt_time, pose_time = getTime(ckpt_time)
runtime_profile['pt'].append(pose_time)
hm = hm.cpu().data
writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
ckpt_time, post_time = getTime(ckpt_time)
runtime_profile['pn'].append(post_time)
if args.profile:
# TQDM
im_names_desc.set_description(
'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}'.format(
dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))
)
print('===========================> Finish Model Running.')
if (args.save_img or args.save_video) and not args.vis_fast:
print('===========================> Rendering remaining images in the queue...')
print('===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).')
while(writer.running()):
pass
writer.stop()
final_result = writer.results()
write_json(final_result, args.outputpath)
| [
"torch.cat",
"torch.no_grad",
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.set_sharing_strategy"
] | 0.4.0 | mdraw/AlphaPose | bed8e0798f6deed4789b9ae2646f72b9fd138c5b |
1.7 | import sys
import os
import torch
import pandas as pd
import datetime
from argparse import ArgumentParser
import numpy as np
from torch import nn, optim
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
from icecream import ic
import pytorch_lightning as pl
from pytorch_lightning.metrics import functional as FM
from network.ecgresnet_auxout import ECGResNet_AuxOut
from utils.helpers import create_results_directory
from utils.focalloss_weights import FocalLoss
class ECGResNetEnsemble_AuxOutSystem(pl.LightningModule):
"""
This class implements the ECGResNet with ensemble and auxiliary output in PyTorch Lightning.
It can estimate the epistemic and aleatoric uncertainty of its predictions.
"""
def __init__(self, in_channels, n_grps, N,
num_classes, dropout, first_width, stride,
dilation, learning_rate, ensemble_size, n_logit_samples, loss_weights=None,
**kwargs):
"""
Initializes the ECGResNetEnsemble_AuxOutSystem
Args:
in_channels: number of channels of input
n_grps: number of ResNet groups
N: number of blocks per groups
num_classes: number of classes of the classification problem
dropout: probability of an argument to get zeroed in the dropout layer
first_width: width of the first input
stride: tuple with stride value per block per group
dilation: spacing between the kernel points of the convolutional layers
learning_rate: the learning rate of the model
ensemble_size: the number of models that make up the ensemble
n_logit_samples: number of logit samples of the auxiliary output
loss_weights: array of weights for the loss term
"""
super().__init__()
self.save_hyperparameters()
self.learning_rate = learning_rate
self.num_classes = num_classes
self.ensemble_size = ensemble_size
self.n_logit_samples = n_logit_samples
self.IDs = torch.empty(0).type(torch.LongTensor)
self.predicted_labels = torch.empty(0).type(torch.LongTensor)
self.correct_predictions = torch.empty(0).type(torch.BoolTensor)
self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.total_uncertainty = torch.empty(0).type(torch.FloatTensor)
self.models = []
self.optimizers = []
for i in range(self.ensemble_size):
self.models.append(ECGResNet_AuxOut(in_channels,
n_grps, N, num_classes,
dropout, first_width,
stride, dilation)
)
if loss_weights is not None:
weights = torch.tensor(loss_weights, dtype = torch.float)
else:
weights = loss_weights
self.loss = FocalLoss(gamma=1, weights = weights)
def forward(self, x, model_idx):
"""Performs a forward through a single ensemble member.
Args:
x (tensor): Input data.
model_idx (int): Index of the ensemble member.
Returns:
output1: Output at the auxiliary point of the ensemble member
output2: Output at the end of the ensemble member
output2_log_var: The log variance of the ensemble_member
"""
output1, output2_mean, output2_log_var = self.models[model_idx](x)
return output1, output2_mean, output2_log_var
def training_step(self, batch, batch_idx, optimizer_idx):
"""Performs a training step for all ensemble members.
Args:
batch (dict): Output of the dataloader.
batch_idx (int): Index no. of this batch.
Returns:
tensor: Total loss for this step.
"""
data, target = batch['waveform'], batch['label']
losses = []
for model_idx in range(self.ensemble_size):
# Make prediction
output1, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
train_loss1 = self.loss(output1, target)
train_loss2 = self.loss(x_i, target)
total_train_loss = (0.3 * train_loss1) + train_loss2
# Update weights for each model using individual optimizers
self.manual_backward(total_train_loss, self.optimizers[model_idx])
self.optimizers[model_idx].step()
self.optimizers[model_idx].zero_grad()
losses.append(total_train_loss.item())
self.log('model_{}_train_loss'.format(model_idx), total_train_loss)
average_train_loss = np.mean(losses)
self.log('average_train_loss', average_train_loss)
return {'loss': average_train_loss}
def validation_step(self, batch, batch_idx):
prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
data, target = batch['waveform'], batch['label']
# Predict for each model
for model_idx in range(self.ensemble_size):
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning avector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
prediction_individual[:, model_idx] = x_i
# Calculate mean over predictions from individual ensemble members
prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)
val_loss = self.loss(prediction_ensemble_mean, target)
acc = FM.accuracy(prediction_ensemble_mean, target)
# loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on'
metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()}
self.log('val_acc', acc.item())
self.log('val_loss', val_loss.item())
return metrics
def test_step(self, batch, batch_idx, save_to_csv=False):
prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes)
data, target = batch['waveform'], batch['label']
# Predict for each model
for model_idx, model in enumerate(self.models):
# Make prediction
_, output2_mean, output2_log_var = self(data, model_idx)
# Sample from logits, returning a vector x_i
x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True)
prediction_individual[:, model_idx] = x_i.data
# Take exponent to get the variance
output2_var = output2_log_var.exp()
aleatoric_var[:, model_idx] = output2_var.data
# Calculate mean and variance over predictions from individual ensemble members
prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1)
prediction_ensemble_var = torch.var(prediction_individual, dim=1)
# Get the average aleatoric uncertainty for each prediction
prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1)
# Select the predicted labels
predicted_labels = prediction_ensemble_mean.argmax(dim=1)
test_loss = self.loss(prediction_ensemble_mean, target)
acc = FM.accuracy(prediction_ensemble_mean, target)
# Get the epistemic variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
# Get the aleatoric variance of the predicted labels by selecting the variance of
# the labels with highest average Softmax value
predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu()
total_var = predicted_labels_var + predicted_labels_aleatoric_var
# Log and save metrics
self.log('test_acc', acc.item())
self.log('test_loss', test_loss.item())
self.IDs = torch.cat((self.IDs, batch['id']), 0)
self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0)
self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0)
self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0)
self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0)
self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0)
return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}
def configure_optimizers(self):
"""
Initialize an optimizer for each model in the ensemble
"""
for i in range(self.ensemble_size):
self.optimizers.append(optim.Adam(self.models[i].parameters(), lr=self.learning_rate))
return self.optimizers
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--model_name', type=str, default='ensemble_none')
parser.add_argument('--ensemble_size', type=int, default=5)
parser.add_argument('--ensembling_method', type=bool, default=True)
parser.add_argument('--n_logit_samples', type=int, default=100)
return parser
def save_results(self):
"""
Combine results into single dataframe and save to disk as .csv file
"""
results = pd.concat([
pd.DataFrame(self.IDs.numpy(), columns= ['ID']),
pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']),
pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']),
pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']),
pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']),
pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']),
], axis=1)
create_results_directory()
results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
| [
"torch.cat",
"torch.var",
"torch.tensor",
"torch.empty",
"torch.mean"
] | 1.7.1 | HabibMrad/uncertainty | 1646a9b07d1179045dd0375149250d5ac7501004 |
1.1 | import os
import torch
from tensorboardX import SummaryWriter
import time
import glob
import re
import datetime
import argparse
from pathlib import Path
import torch.distributed as dist
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from eval_utils import eval_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=80, required=False, help='Number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
else:
args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.batch_size, args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| [
"torch.distributed.get_world_size",
"torch.no_grad"
] | 1.1 | TillBeemelmanns/OpenPCDet | b7553c879d0ba36477931efe07a55adbc39823b9 |
1.1 | import numpy as np
import torch
import random
import logging
import os
import torch.multiprocessing as mp
import torch.distributed as dist
import subprocess
import pickle
import shutil
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def limit_period(val, offset=0.5, period=np.pi):
val, is_numpy = check_numpy_to_torch(val)
ans = val - torch.floor(val / period + offset) * period
return ans.numpy() if is_numpy else ans
def drop_info_with_name(info, name):
ret_info = {}
keep_indices = [i for i, x in enumerate(info['name']) if x != name]
for key in info.keys():
ret_info[key] = info[key][keep_indices]
return ret_info
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def mask_points_by_range(points, limit_range):
mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \
& (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])
return mask
def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range):
"""
Args:
voxel_coords: (N, 3)
downsample_times:
voxel_size:
point_cloud_range:
Returns:
"""
assert voxel_coords.shape[1] == 3
voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz)
voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times
pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float()
voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range
return voxel_centers
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else 'ERROR')
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s')
console = logging.StreamHandler()
console.setLevel(log_level if rank == 0 else 'ERROR')
console.setFormatter(formatter)
logger.addHandler(console)
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else 'ERROR')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def keep_arrays_by_name(gt_names, used_classes):
inds = [i for i, x in enumerate(gt_names) if x in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
def init_dist_slurm(batch_size, tcp_port, local_rank, backend='nccl'):
"""
modified from https://github.com/open-mmlab/mmdetection
Args:
batch_size:
tcp_port:
backend:
Returns:
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list))
os.environ['MASTER_PORT'] = str(tcp_port)
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
total_gpus = dist.get_world_size()
assert batch_size % total_gpus == 0, 'Batch size should be matched with GPUS: (%d, %d)' % (batch_size, total_gpus)
batch_size_each_gpu = batch_size // total_gpus
rank = dist.get_rank()
return batch_size_each_gpu, rank
def init_dist_pytorch(batch_size, tcp_port, local_rank, backend='nccl'):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(local_rank % num_gpus)
dist.init_process_group(
backend=backend,
init_method='tcp://127.0.0.1:%d' % tcp_port,
rank=local_rank,
world_size=num_gpus
)
assert batch_size % num_gpus == 0, 'Batch size should be matched with GPUS: (%d, %d)' % (batch_size, num_gpus)
batch_size_each_gpu = batch_size // num_gpus
rank = dist.get_rank()
return batch_size_each_gpu, rank
def get_dist_info():
if torch.__version__ < '1.0':
initialized = dist._initialized
else:
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def merge_results_dist(result_part, size, tmpdir):
rank, world_size = get_dist_info()
os.makedirs(tmpdir, exist_ok=True)
dist.barrier()
pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb'))
dist.barrier()
if rank != 0:
return None
part_list = []
for i in range(world_size):
part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i))
part_list.append(pickle.load(open(part_file, 'rb')))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.stack",
"torch.distributed.init_process_group",
"torch.multiprocessing.set_start_method",
"torch.manual_seed",
"torch.distributed.is_initialized",
"torch.tensor",
"torch.distributed.get_rank",
"torch.cos",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.barrier",
"torch.matmul",
"torch.distributed.is_available",
"torch.sin",
"torch.multiprocessing.get_start_method",
"torch.from_numpy",
"torch.floor"
] | 1.1 | TillBeemelmanns/OpenPCDet | b7553c879d0ba36477931efe07a55adbc39823b9 |
0.4 | import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class ResidualBlock_back(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features)
)
def forward(self, x):
return x + self.block(x)
class ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
(2(128-1)-64 +3)/2
### ENCODER
self.encode_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=2*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=2),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.encode_block : {}".format(self.encode_block))
self.decode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2), padding=2),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=2*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.decode_block : {}".format(self.decode_block))
def forward(self, x):
encode_x = self.encode_block(x)
decode_x = self.decode_block(encode_x)
# decode_x = decode_x[:, :, :-1, :-1]
# decode_x = F.sigmoid(decode_x)
return x + decode_x
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
# resblocks.append(ResidualBlock())
resblocks.append(ResidualBlock())
self.resblocks = nn.Sequential(*resblocks)
self.l2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
out = self.l1(gen_input)
out = self.resblocks(out)
img_ = self.l2(out)
return img_
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
generator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(generator.parameters(), classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
fake_B = generator(imgs_A, z)
# Perform task on translated source image
label_pred = classifier(fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(fake_B), valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(imgs_B), valid)
fake_loss = adversarial_loss(discriminator(fake_B.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.Softmax",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.init.normal_",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.CrossEntropyLoss"
] | 0.4.0 | Napkin-DL/PyTorch-GAN | 4668fb434a74a4e4771631944e4abfb0ec1c8795 |
0.4 | import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
print("cuda : {}".format(cuda))
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class ResidualBlock_back(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features)
)
def forward(self, x):
return x + self.block(x)
class sencode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(sencode_ResidualBlock, self).__init__()
### ENCODER
self.sencode_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(8*in_features),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
encode_x = self.sencode_block(x)
return x, encode_x
class sdecode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(sdecode_ResidualBlock, self).__init__()
self.sdecode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True),
)
def forward(self, encode_x):
decode_x = self.sdecode_block(encode_x)
decode_x = decode_x[:, :, :-1, :-1]
decode_x = F.sigmoid(decode_x)
return decode_x
class tencode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(tencode_ResidualBlock, self).__init__()
### ENCODER
self.tencode_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=4*in_features,out_channels=8*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(8*in_features),
nn.LeakyReLU(inplace=True)
)
def forward(self, x):
encode_x = self.tencode_block(x)
return x, encode_x
class tdecode_ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(tdecode_ResidualBlock, self).__init__()
self.tdecode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=8*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2), padding=0),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=1),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True),
)
def forward(self, encode_x):
decode_x = self.tdecode_block(encode_x)
decode_x = decode_x[:, :, :-1, :-1]
decode_x = F.sigmoid(decode_x)
return decode_x
class target_encode_Generator(nn.Module):
def __init__(self):
super(target_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.tfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.tl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(tencode_ResidualBlock())
self.tencode_resblocks = nn.Sequential(*resblocks)
def forward(self, img, z):
gen_input = torch.cat((img, self.tfc(z).view(*img.shape)), 1)
out = self.tl1(gen_input)
x, encode_out = self.tencode_resblocks(out)
return x, encode_out
class source_encode_Generator(nn.Module):
def __init__(self):
super(source_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.sfc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.sl1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(sencode_ResidualBlock())
self.sencode_resblocks = nn.Sequential(*resblocks)
def forward(self, img, z):
gen_input = torch.cat((img, self.sfc(z).view(*img.shape)), 1)
out = self.sl1(gen_input)
x, encode_out = self.sencode_resblocks(out)
return x, encode_out
class target_decode_Generator(nn.Module):
def __init__(self):
super(target_decode_Generator, self).__init__()
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(tdecode_ResidualBlock())
self.target_decode_resblocks = nn.Sequential(*resblocks)
self.tl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, encode_out):
out = img + self.target_decode_resblocks(encode_out)
img_ = self.tl2(out)
return img_
class source_decode_Generator(nn.Module):
def __init__(self):
super(source_decode_Generator, self).__init__()
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(sdecode_ResidualBlock())
self.source_decode_resblocks = nn.Sequential(*resblocks)
self.sl2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, encode_out):
out = img + self.source_decode_resblocks(encode_out)
img_ = self.sl2(out)
return img_
class encode_Discriminator(nn.Module):
def __init__(self):
super(encode_Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(256, 512, normalization=False),
*block(512, 1024),
nn.Conv2d(1024, 1, 3, 1, 1)
)
def forward(self, encode_x):
validity = self.model(encode_x)
return validity
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class encode_Classifier(nn.Module):
def __init__(self):
super(encode_Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(256, 512, normalization=False),
*block(512, 1024)
*block(1024, 2048)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(2048*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
encode_adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
target_encode_generator = target_encode_Generator()
target_decode_generator = target_decode_Generator()
source_encode_generator = source_encode_Generator()
source_decode_generator = source_decode_Generator()
encode_discriminator = encode_Discriminator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
target_encode_generator.cuda()
target_decode_generator.cuda()
source_encode_generator.cuda()
source_decode_generator.cuda()
encode_discriminator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
encode_adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
target_encode_generator.apply(weights_init_normal)
target_decode_generator.apply(weights_init_normal)
source_encode_generator.apply(weights_init_normal)
source_decode_generator.apply(weights_init_normal)
encode_discriminator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(),
source_encode_generator.parameters(), target_decode_generator.parameters(),
source_decode_generator.parameters(),
classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
imgs_A_x, encode_fake_B = source_encode_generator(imgs_A, z)
decode_fake_B = source_decode_generator(imgs_A_x, encode_fake_B)
# Perform task on translated source image
label_pred = classifier(decode_fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \
0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
imgs_B_x, encode_real_B = target_encode_generator(imgs_B, z)
decode_real_B = target_decode_generator(imgs_B_x, encode_real_B)
# Measure discriminator's ability to classify real from generated samples
encode_real_loss = adversarial_loss(encode_discriminator(encode_real_B), valid)
encode_fake_loss = adversarial_loss(encode_discriminator(encode_fake_B.detach()), fake)
decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)
decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)
encode_d_loss = (encode_real_loss + encode_fake_loss) / 2
decode_d_loss = (decode_real_loss + decode_fake_loss) / 2
d_loss = encode_d_loss + decode_d_loss
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.nn.Softmax",
"torch.nn.init.constant_",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.nn.functional.sigmoid",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.MSELoss"
] | 0.4.0 | Napkin-DL/PyTorch-GAN | 4668fb434a74a4e4771631944e4abfb0ec1c8795 |
0.4 | import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
print("cuda : {}".format(cuda))
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class encode_ResidualBlock1(nn.Module):
def __init__(self, in_features=32, out_features=64, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return x, encode_x
class encode_ResidualBlock2(nn.Module):
def __init__(self, in_features=64, out_features=128, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class encode_ResidualBlock3(nn.Module):
def __init__(self, in_features=128, out_features=256, kernel_size=3, stride=2, padding=1):
super(encode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size= kernel_size, stride = stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, x):
encode_x = self.block(x)
return encode_x
class decode_ResidualBlock1(nn.Module):
def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):
super(decode_ResidualBlock1, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
decode_x = decode_x[:,:,:-1,:-1]
return decode_x
class decode_ResidualBlock2(nn.Module):
def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):
super(decode_ResidualBlock2, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
decode_x = decode_x[:,:,:-1,:-1]
return decode_x
class decode_ResidualBlock3(nn.Module):
def __init__(self, in_features=256, out_features=128, kernel_size=3, stride=2, padding=0):
super(decode_ResidualBlock3, self).__init__()
self.block = nn.Sequential(
nn.ConvTranspose2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_features),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, encode_x):
decode_x = self.block(encode_x)
decode_x = decode_x[:,:,:-1,:-1]
return decode_x
class source_encode_Generator(nn.Module):
def __init__(self):
super(source_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.encode_resblocks1 = encode_ResidualBlock1()
self.encode_resblocks2 = encode_ResidualBlock2()
self.encode_resblocks3 = encode_ResidualBlock3()
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
encode_x = self.l1(gen_input)
x, encode_out1 = self.encode_resblocks1(encode_x)
encode_out2 = self.encode_resblocks2(encode_out1)
encode_out3 = self.encode_resblocks3(encode_out2)
return x, encode_out1, encode_out2, encode_out3
class target_encode_Generator(nn.Module):
def __init__(self):
super(target_encode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 32, 3, 1, 1), nn.ReLU(inplace=True))
self.encode_resblocks1 = encode_ResidualBlock1()
self.encode_resblocks2 = encode_ResidualBlock2()
self.encode_resblocks3 = encode_ResidualBlock3()
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
encode_x = self.l1(gen_input)
x, encode_out1 = self.encode_resblocks1(encode_x)
encode_out2 = self.encode_resblocks2(encode_out1)
encode_out3 = self.encode_resblocks3(encode_out2)
return x, encode_out1, encode_out2, encode_out3
class decode_Generator(nn.Module):
def __init__(self):
super(decode_Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.decode_resblocks1 = decode_ResidualBlock1()
self.decode_resblocks2 = decode_ResidualBlock2()
self.decode_resblocks3 = decode_ResidualBlock3()
self.l2 = nn.Sequential(nn.Conv2d(32, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, x, encode_out1, encode_out2, encode_out3):
print(x.size(),encode_out1.size(), encode_out2.size(), encode_out3.size() )
decode_out1 = self.decode_resblocks1(encode_out3)
print(decode_out1.size())
decode_out2 = self.decode_resblocks2(torch.cat([decode_out1,encode_out2], dim=1))
print(decode_out2.size())
decode_out3 = self.decode_resblocks3(torch.cat([decode_out2,encode_out1], dim=1))
print(decode_out3.size())
decode_x = F.sigmoid(decode_out3)
decode_x = decode_x[:, :, :-1, :-1]
out = x + decode_x
img_ = self.l2(out)
return img_
class encode_Discriminator(nn.Module):
def __init__(self):
super(encode_Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(512, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, encode_x):
validity = self.model(encode_x)
return validity
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
encode_adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
target_encode_generator = target_encode_Generator()
source_encode_generator = source_encode_Generator()
decode_generator = decode_Generator()
encode_discriminator = encode_Discriminator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
target_encode_generator.cuda()
source_encode_generator.cuda()
decode_generator.cuda()
encode_discriminator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
encode_adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
target_encode_generator.apply(weights_init_normal)
source_encode_generator.apply(weights_init_normal)
decode_generator.apply(weights_init_normal)
encode_discriminator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(target_encode_generator.parameters(),
source_encode_generator.parameters(),
decode_generator.parameters(),
classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(itertools.chain(encode_discriminator.parameters(), discriminator.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
encode_valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
encode_fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
imgs_A_x, sencode_1, sencode_2, encode_fake_B = source_encode_generator(imgs_A, z)
decode_fake_B = decode_generator(imgs_A_x, sencode_1, sencode_2, encode_fake_B)
# Perform task on translated source image
label_pred = classifier(decode_fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(decode_fake_B), valid) + \
0.1 * encode_adversarial_loss(encode_discriminator(encode_fake_B), encode_valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
imgs_B_x, tencode_1, tencode_2, encode_real_B = target_encode_generator(imgs_B, z)
decode_real_B = decode_generator(imgs_B_x, tencode_1, tencode_2, encode_real_B)
# Measure discriminator's ability to classify real from generated samples
encode_real_loss = encode_adversarial_loss(encode_discriminator(encode_real_B), encode_valid)
encode_fake_loss = encode_adversarial_loss(encode_discriminator(encode_fake_B.detach()), encode_fake)
decode_real_loss = adversarial_loss(discriminator(decode_real_B), valid)
decode_fake_loss = adversarial_loss(discriminator(decode_fake_B.detach()), fake)
encode_d_loss = (encode_real_loss + encode_fake_loss) / 2
decode_d_loss = (decode_real_loss + decode_fake_loss) / 2
d_loss = encode_d_loss + decode_d_loss
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], decode_fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| [
"torch.nn.Linear",
"torch.nn.functional.sigmoid",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.Softmax",
"torch.nn.init.constant_",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.nn.init.normal_",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.CrossEntropyLoss"
] | 0.4.0 | Napkin-DL/PyTorch-GAN | 4668fb434a74a4e4771631944e4abfb0ec1c8795 |
1.7 | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ expert.py ]
# Synopsis [ the phone linear downstream wrapper ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import math
import torch
import random
import pathlib
#-------------#
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, DistributedSampler
from torch.distributed import is_initialized
from torch.nn.utils.rnn import pad_sequence
#-------------#
from ..model import *
from .dataset import SpeakerClassifiDataset
from argparse import Namespace
from pathlib import Path
class DownstreamExpert(nn.Module):
"""
Used to handle downstream-specific operations
eg. downstream forward, metric computation, contents to log
"""
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
root_dir = Path(self.datarc['file_path'])
self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])
self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])
self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(
input_dim = self.modelrc['projector_dim'],
output_dim = self.train_dataset.speaker_num,
**model_conf,
)
self.objective = nn.CrossEntropyLoss()
self.logging = os.path.join(expdir, 'log.log')
self.register_buffer('best_score', torch.zeros(1))
def _get_train_dataloader(self, dataset):
sampler = DistributedSampler(dataset) if is_initialized() else None
return DataLoader(
dataset, batch_size=self.datarc['train_batch_size'],
shuffle=(sampler is None), sampler=sampler,
num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def _get_eval_dataloader(self, dataset):
return DataLoader(
dataset, batch_size=self.datarc['eval_batch_size'],
shuffle=False, num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
# Interface
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
# Interface
def forward(self, mode, features, labels, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
predicted, _ = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=-1).indices
records['acc'] += (predicted_classid == labels).view(-1).cpu().float().tolist()
records['loss'].append(loss.item())
return loss
# interface
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key, values in records.items():
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(
f'voxceleb1/{mode}-{key}',
average,
global_step=global_step
)
with open(self.logging, 'a') as f:
if key == 'acc':
f.write(f'{mode} at step {global_step}: {average}\n')
if mode == 'dev' and average > self.best_score:
self.best_score = torch.ones(1) * average
f.write(f'New best on {mode} at step {global_step}: {average}\n')
save_names.append(f'{mode}-best.ckpt')
return save_names
| [
"torch.nn.Linear",
"torch.zeros",
"torch.utils.data.DistributedSampler",
"torch.nn.utils.rnn.pad_sequence",
"torch.FloatTensor",
"torch.ones",
"torch.distributed.is_initialized",
"torch.LongTensor",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
] | 1.7.0 | andybi7676/s3prl | 0e5acc5d499a629f946d561d87e8924ba3eb004b |
1.0 | from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.lr_scheduler import CosineAnnealingLR
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
warmup_epoch: target learning rate is linearly reached at the warmup_epoch
scheduler: scheduler used after warmup_epoch (eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, warmup_epoch, multiplier=1.0, scheduler=None):
assert multiplier > 1., 'multiplier should be greater than 1.'
self.multiplier = multiplier
self.warmup_epoch = warmup_epoch
self.scheduler = scheduler
self.finish_warmup = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.warmup_epoch:
if self.scheduler:
if not self.finish_warmup:
self.scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finish_warmup = True
return self.scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [base_lr*((self.multiplier-1.)*self.last_epoch/self.warmup_epoch+1.) for base_lr in self.base_lrs]
def step(self, epoch=None, metrics=None):
if self.finish_warmup and self.scheduler:
if epoch is None:
self.scheduler.step(None)
else:
self.scheduler.step(epoch - self.warmup_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
if __name__ == '__main__':
import torch
v = torch.zeros(10, requires_grad=True)
optim = torch.optim.SGD([v], lr=0.01)
scheduler = CosineAnnealingLR(optim, 95)
scheduler = GradualWarmupScheduler(optim, multiplier=10, warmup_epoch=5, scheduler=scheduler)
for epoch in range(0, 100):
scheduler.step(epoch)
print(epoch, optim.param_groups[0]['lr'])
| [
"torch.zeros",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.optim.SGD"
] | 1.0.1 | arielclj/singa-easy | fd4bc601a5501062936f874df14711a3cefa1346 |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABC, abstractmethod
from contextlib import contextmanager
from typing import Any, List, Optional
import torch
from torch.nn.parallel import DistributedDataParallel
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import unwrap_lightning_module
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.training_type.training_type_plugin import TrainingTypePlugin
from pytorch_lightning.utilities import _XLA_AVAILABLE
from pytorch_lightning.utilities.distributed import all_gather_ddp_if_available, ReduceOp
class ParallelPlugin(TrainingTypePlugin, ABC):
""" Plugin for training with multiple processes in parallel. """
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
):
super().__init__()
self.parallel_devices = parallel_devices
self.cluster_environment = cluster_environment
@property
@abstractmethod
def root_device(self) -> torch.device:
raise NotImplementedError
@property
def on_gpu(self) -> bool:
return self.root_device.type == "cuda" and torch.cuda.is_available()
@property
def on_tpu(self) -> bool:
return self.root_device.type == "xla" and _XLA_AVAILABLE
@property
def lightning_module(self):
return unwrap_lightning_module(self._model)
@property
def global_rank(self) -> int:
return self.cluster_environment.global_rank() if self.cluster_environment is not None else 0
@property
def local_rank(self) -> int:
return self.cluster_environment.local_rank() if self.cluster_environment is not None else 0
@property
def node_rank(self) -> int:
return self.cluster_environment.node_rank() if self.cluster_environment is not None else 0
@property
def world_size(self) -> int:
return self.cluster_environment.world_size() if self.cluster_environment is not None else 1
@property
def is_global_zero(self) -> bool:
return self.global_rank == 0
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=len(self.parallel_devices), rank=self.global_rank)
return distributed_sampler_kwargs
def reconciliate_processes(self, trace: str):
"""
Function to re-conciliate processes on failure
"""
def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:
"""Perform a all_gather on all processes """
return all_gather_ddp_if_available(tensor, group=group, sync_grads=sync_grads)
def reduce_boolean_decision(self, decision: bool) -> bool:
decision = torch.tensor(int(decision), device=self.lightning_module.device)
decision = self.reduce(decision, reduce_op=ReduceOp.SUM)
decision = bool(decision == self.world_size)
return decision
@property
def torch_distributed_backend(self):
torch_backend = os.getenv("PL_TORCH_DISTRIBUTED_BACKEND")
if torch_backend is None:
torch_backend = "nccl" if self.on_gpu else "gloo"
return torch_backend
@staticmethod
def configure_sync_batchnorm(model: 'pl.LightningModule') -> 'pl.LightningModule':
"""
Add global batchnorm for a model spread across multiple GPUs and nodes.
Override to synchronize batchnorm between specific process groups instead
of the whole world or use a different sync_bn like `apex`'s version.
Args:
model: pointer to current :class:`LightningModule`.
Return:
LightningModule with batchnorm layers synchronized between process groups
"""
return torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
@contextmanager
def block_backward_sync(self):
"""
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
Returns: context manager with sync behaviour off
"""
if isinstance(self.model, DistributedDataParallel):
with self.model.no_sync():
yield None
else:
yield None
def teardown(self) -> None:
# Un-reference the wrapper if any was used.
# todo (tchaton): Add support for all plugins.
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
| [
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm"
] | 1.6 | randommm/pytorch-lightning | 10e87b7b7acbbad8fc12ec5c07638ed093547ef8 |
0.4 | #!/usr/bin/env python
import random
import argparse
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import torchvision.utils as vutils
import gym
import gym.spaces
import numpy as np
log = gym.logger
log.set_level(gym.logger.INFO)
LATENT_VECTOR_SIZE = 100
DISCR_FILTERS = 64
GENER_FILTERS = 64
BATCH_SIZE = 16
# dimension input image will be rescaled
IMAGE_SIZE = 64
LEARNING_RATE = 0.0001
REPORT_EVERY_ITER = 25
SAVE_IMAGE_EVERY_ITER = 1000
class InputWrapper(gym.ObservationWrapper):
"""
Preprocessing of input numpy array:
1. resize image into predefined size
2. move color channel axis to a first place
"""
def __init__(self, *args):
super(InputWrapper, self).__init__(*args)
assert isinstance(self.observation_space, gym.spaces.Box)
old_space = self.observation_space
self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),
dtype=np.float32)
def observation(self, observation):
# resize image
new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE))
# transform (210, 160, 3) -> (3, 210, 160)
new_obs = np.moveaxis(new_obs, 2, 0)
return new_obs.astype(np.float32)
class Discriminator(nn.Module):
def __init__(self, input_shape):
super(Discriminator, self).__init__()
# this pipe converges image into the single number
self.conv_pipe = nn.Sequential(
nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS*2),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 4),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 8),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,
kernel_size=4, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
conv_out = self.conv_pipe(x)
return conv_out.view(-1, 1).squeeze(dim=1)
class Generator(nn.Module):
def __init__(self, output_shape):
super(Generator, self).__init__()
# pipe deconvolves input vector into (3, 64, 64) image
self.pipe = nn.Sequential(
nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,
kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(GENER_FILTERS * 8),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 4),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 2),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],
kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
return self.pipe(x)
def iterate_batches(envs, batch_size=BATCH_SIZE):
batch = [e.reset() for e in envs]
env_gen = iter(lambda: random.choice(envs), None)
while True:
e = next(env_gen)
obs, reward, is_done, _ = e.step(e.action_space.sample())
if np.mean(obs) > 0.01:
batch.append(obs)
if len(batch) == batch_size:
# Normalising input between -1 to 1
batch_np = np.array(batch, dtype=np.float32) * 2.0 / 255.0 - 1.0
yield torch.tensor(batch_np)
batch.clear()
if is_done:
e.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--cuda", default=False, action='store_true', help="Enable cuda computation")
parser.add_argument("--cuda", default=True, action='store_true', help="Enable cuda computation")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]
input_shape = envs[0].observation_space.shape
net_discr = Discriminator(input_shape=input_shape).to(device)
net_gener = Generator(output_shape=input_shape).to(device)
objective = nn.BCELoss()
gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
writer = SummaryWriter()
gen_losses = []
dis_losses = []
iter_no = 0
true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)
fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)
for batch_v in iterate_batches(envs):
# generate extra fake samples, input is 4D: batch, filters, x, y
gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)
batch_v = batch_v.to(device)
gen_output_v = net_gener(gen_input_v)
# train discriminator
dis_optimizer.zero_grad()
dis_output_true_v = net_discr(batch_v)
dis_output_fake_v = net_discr(gen_output_v.detach())
dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
dis_loss.backward()
dis_optimizer.step()
dis_losses.append(dis_loss.item())
# train generator
gen_optimizer.zero_grad()
dis_output_v = net_discr(gen_output_v)
gen_loss_v = objective(dis_output_v, true_labels_v)
gen_loss_v.backward()
gen_optimizer.step()
gen_losses.append(gen_loss_v.item())
iter_no += 1
if iter_no % REPORT_EVERY_ITER == 0:
log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
gen_losses = []
dis_losses = []
if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
writer.add_image("fake", vutils.make_grid(gen_output_v.data[:64], normalize=True), iter_no)
writer.add_image("real", vutils.make_grid(batch_v.data[:64], normalize=True), iter_no)
| [
"torch.zeros",
"torch.device",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.FloatTensor",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.tensor",
"torch.nn.BCELoss"
] | 0.4.1 | Yelloooowww/Deep-Reinforcement-Learning-Hands-On | d1a3a1272d7ceff8796fe412deb4e4d5bd6665a5 |
1.6 | import torch
from .elliptical_slice import EllipticalSliceSampler
class MeanEllipticalSliceSampler(EllipticalSliceSampler):
def __init__(self, f_init, dist, lnpdf, nsamples, pdf_params=()):
"""
Implementation of elliptical slice sampling (Murray, Adams, & Mckay, 2010).
f_init: initial value of `f`
dist: multivariate normal to sample from to sample from
lnpdf: likelihood function
n_samples: number of samples
pdf_params: callable arguments for lnpdf
"""
mean_vector = dist.mean
demeaned_lnpdf = lambda g: lnpdf(g + mean_vector, *pdf_params)
demeaned_init = f_init - mean_vector
samples = dist.sample(sample_shape = torch.Size((nsamples,))).transpose(-1, -2)
demeaned_samples = samples - mean_vector.unsqueeze(1)
super(MeanEllipticalSliceSampler, self).__init__(demeaned_init, demeaned_samples, demeaned_lnpdf, nsamples, pdf_params=())
self.mean_vector = mean_vector
def run(self):
self.f_sampled, self.ell = super().run()
#add means back into f_sampled
self.f_sampled = self.f_sampled + self.mean_vector.unsqueeze(1)
return self.f_sampled, self.ell | [
"torch.Size"
] | 1.6.0 | wjmaddox/pytorch_ess | 8e189666ce7381cf760666464384c634abbc4be2 |
1.2 | """
Implement input sentence encoder.
"""
import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from .config import *
from common.constants import DEVICE
from util.tensor_utils import to_sorted_tensor, to_original_tensor
class Encoder(nn.Module):
"""
Transform embeddings to encoding representations.
"""
def __init__(self, config, input_size, dropout=0.1):
"""
Initialize a GRU encoder.
:param config: configuration, includes total enc size, is bi-direction, etc.
:param input_size: input dimension.
:param dropout: dropout rate for GRU
"""
super(Encoder, self).__init__()
self.config = config
self.layers = config.layers
self.num_directions = 2 if config.brnn else 1
assert config.enc_rnn_size % self.num_directions == 0
self.hidden_size = config.enc_rnn_size // self.num_directions
self.rnn = nn.GRU(
input_size, self.hidden_size,
num_layers=config.layers, dropout=config.dropout,
bidirectional=config.brnn, batch_first=True)
def forward(self, input_emb, lengths, hidden=None):
"""
Given input embeddings and input seq lengths, calculate encoding representations.
:param input_emb: embedding of a batch.
Input shape - [seq_len, batch_size, hidden_dim]
:param lengths: lengths of each sample.
:param hidden: hidden of previous layer. Default None.
:return: encoding of a batch.
Output shape - [unpadded_max_thisbatch_seq_len, batch_size, hidden_dim * num_layers]
TODO: revise code to make input and output shape be [batch, length, dim]
"""
# input_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]
# sorted_emb shape: [seq_len, batch_size, hidden_dim] [100, 32, 412]
sorted_input_emb, sorted_lengths, sorted_idx = to_sorted_tensor(
input_emb, lengths, sort_dim=1, device=DEVICE)
emb = pack(sorted_input_emb, sorted_lengths, batch_first=False)
self.rnn.flatten_parameters()
outputs, hidden_t = self.rnn(emb, hidden)
# hidden_t shape: [num_layers, batch_size, hidden_dim] [2, 32, 256]
# outputs shape: [unpadded_seq_len, batch_size, hidden_dim * num_layers] [79, 32, 512]
# !!! NOTICE: it will unpack to max_unpadded_length.
outputs = unpack(outputs, batch_first=False)[0]
outputs = to_original_tensor(
outputs, sorted_idx, sort_dim=1, device=DEVICE)
return hidden_t, outputs
| [
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.GRU",
"torch.nn.utils.rnn.pack_padded_sequence"
] | 1.2.0 | project-delphi/ACS-QG | 03aa5b79030b5ba4c09a99363a58454743876592 |
1.4 | """ Bring-Your-Own-Blocks Network
A flexible network w/ dataclass based config for stacking those NN blocks.
This model is currently used to implement the following networks:
GPU Efficient (ResNets) - gernet_l/m/s (original versions called genet, but this was already used (by SENet author)).
Paper: `Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
Code and weights: https://github.com/idstcv/GPU-Efficient-Networks, licensed Apache 2.0
RepVGG - repvgg_*
Paper: `Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
Code and weights: https://github.com/DingXiaoH/RepVGG, licensed MIT
In all cases the models have been modified to fit within the design of ByobNet. I've remapped
the original weights and verified accuracies.
For GPU Efficient nets, I used the original names for the blocks since they were for the most part
the same as original residual blocks in ResNe(X)t, DarkNet, and other existing models. Note also some
changes introduced in RegNet were also present in the stem and bottleneck blocks for this model.
A significant number of different network archs can be implemented here, including variants of the
above nets that include attention.
Hacked together by / copyright Ross Wightman, 2021.
"""
import math
from dataclasses import dataclass, field, replace
from typing import Tuple, List, Dict, Optional, Union, Any, Callable, Sequence
from functools import partial
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg, named_apply
from .layers import ClassifierHead, ConvBnAct, BatchNormAct2d, DropPath, AvgPool2dSame, \
create_conv2d, get_act_layer, convert_norm_act, get_attn, make_divisible, to_2tuple
from .registry import register_model
__all__ = ['ByobNet', 'ByoModelCfg', 'ByoBlockCfg', 'create_byob_stem', 'create_block']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = {
# GPU-Efficient (ResNet) weights
'gernet_s': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_s-756b4751.pth'),
'gernet_m': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_m-0873c53a.pth'),
'gernet_l': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-ger-weights/gernet_l-f31e2e8d.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# RepVGG weights
'repvgg_a2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_a2-c1ee6d2b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b0': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b0-80ac3f1b.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1-77ca2989.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b1g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b1g4-abde5d92.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2-25b7494e.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b2g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b2g4-165a85f2.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3-199bc50d.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
'repvgg_b3g4': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-repvgg-weights/repvgg_b3g4-73c370bf.pth',
first_conv=('stem.conv_kxk.conv', 'stem.conv_1x1.conv')),
# experimental configs
'resnet51q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet51q_ra2-d47dcc76.pth',
first_conv='stem.conv1', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0),
'resnet61q': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet61q_ra2-6afc536c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8),
test_input_size=(3, 288, 288), crop_pct=1.0, interpolation='bicubic'),
'resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnext26ts_256_ra2-8bbd9106.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext26ts_256-e414378b.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnext26ts_256-6f0d74a3.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnext26ts_256-5a1d030f.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'bat_resnext26ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/bat_resnext26ts_256-fa6fd595.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic',
min_input_size=(3, 256, 256)),
'resnet32ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet32ts_256-aacf5250.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet33ts_256-e91b09a4.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet33ts_256-0e0cd345.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'seresnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/seresnet33ts_256-f8ad44d9.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'eca_resnet33ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_resnet33ts_256-8f98face.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnet50t': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnet50t_256-96374d1c.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
'gcresnext50ts': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/gcresnext50ts_256-3e0f515e.pth',
first_conv='stem.conv1.conv', input_size=(3, 256, 256), pool_size=(8, 8), interpolation='bicubic'),
}
@dataclass
class ByoBlockCfg:
type: Union[str, nn.Module]
d: int # block depth (number of block repeats in stage)
c: int # number of output channels for each block in stage
s: int = 2 # stride of stage (first block)
gs: Optional[Union[int, Callable]] = None # group-size of blocks in stage, conv is depthwise if gs == 1
br: float = 1. # bottleneck-ratio of blocks in stage
# NOTE: these config items override the model cfgs that are applied to all blocks by default
attn_layer: Optional[str] = None
attn_kwargs: Optional[Dict[str, Any]] = None
self_attn_layer: Optional[str] = None
self_attn_kwargs: Optional[Dict[str, Any]] = None
block_kwargs: Optional[Dict[str, Any]] = None
@dataclass
class ByoModelCfg:
blocks: Tuple[Union[ByoBlockCfg, Tuple[ByoBlockCfg, ...]], ...]
downsample: str = 'conv1x1'
stem_type: str = '3x3'
stem_pool: Optional[str] = 'maxpool'
stem_chs: int = 32
width_factor: float = 1.0
num_features: int = 0 # num out_channels for final conv, no final 1x1 conv if 0
zero_init_last: bool = True # zero init last weight (usually bn) in residual path
fixed_input_size: bool = False # model constrained to a fixed-input size / img_size must be provided on creation
act_layer: str = 'relu'
norm_layer: str = 'batchnorm'
# NOTE: these config items will be overridden by the block cfg (per-block) if they are set there
attn_layer: Optional[str] = None
attn_kwargs: dict = field(default_factory=lambda: dict())
self_attn_layer: Optional[str] = None
self_attn_kwargs: dict = field(default_factory=lambda: dict())
block_kwargs: Dict[str, Any] = field(default_factory=lambda: dict())
def _rep_vgg_bcfg(d=(4, 6, 16, 1), wf=(1., 1., 1., 1.), groups=0):
c = (64, 128, 256, 512)
group_size = 0
if groups > 0:
group_size = lambda chs, idx: chs // groups if (idx + 1) % 2 == 0 else 0
bcfg = tuple([ByoBlockCfg(type='rep', d=d, c=c * wf, gs=group_size) for d, c, wf in zip(d, c, wf)])
return bcfg
def interleave_blocks(
types: Tuple[str, str], d, every: Union[int, List[int]] = 1, first: bool = False, **kwargs
) -> Tuple[ByoBlockCfg]:
""" interleave 2 block types in stack
"""
assert len(types) == 2
if isinstance(every, int):
every = list(range(0 if first else every, d, every + 1))
if not every:
every = [d - 1]
set(every)
blocks = []
for i in range(d):
block_type = types[1] if i in every else types[0]
blocks += [ByoBlockCfg(type=block_type, d=1, **kwargs)]
return tuple(blocks)
model_cfgs = dict(
gernet_l=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=5, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=4, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_m=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=128, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=2, c=192, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=6, c=640, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=4, c=640, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=640, s=1, gs=1, br=3.),
),
stem_chs=32,
stem_pool=None,
num_features=2560,
),
gernet_s=ByoModelCfg(
blocks=(
ByoBlockCfg(type='basic', d=1, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='basic', d=3, c=48, s=2, gs=0, br=1.),
ByoBlockCfg(type='bottle', d=7, c=384, s=2, gs=0, br=1 / 4),
ByoBlockCfg(type='bottle', d=2, c=560, s=2, gs=1, br=3.),
ByoBlockCfg(type='bottle', d=1, c=256, s=1, gs=1, br=3.),
),
stem_chs=13,
stem_pool=None,
num_features=1920,
),
repvgg_a2=ByoModelCfg(
blocks=_rep_vgg_bcfg(d=(2, 4, 14, 1), wf=(1.5, 1.5, 1.5, 2.75)),
stem_type='rep',
stem_chs=64,
),
repvgg_b0=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(1., 1., 1., 2.5)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b1g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2., 2., 2., 4.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b2=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b2g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(2.5, 2.5, 2.5, 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
repvgg_b3=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.)),
stem_type='rep',
stem_chs=64,
),
repvgg_b3g4=ByoModelCfg(
blocks=_rep_vgg_bcfg(wf=(3., 3., 3., 5.), groups=4),
stem_type='rep',
stem_chs=64,
),
# 4 x conv stem w/ 2 act, no maxpool, 2,4,6,4 repeats, group size 32 in first 3 blocks
# DW convs in last block, 2048 pre-FC, silu act
resnet51q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad2',
stem_pool=None,
num_features=2048,
act_layer='silu',
),
# 4 x conv stem w/ 4 act, no maxpool, 1,4,6,4 repeats, edge block first, group size 32 in next 2 blocks
# DW convs in last block, 4 conv for each bottle block, 2048 pre-FC, silu act
resnet61q=ByoModelCfg(
blocks=(
ByoBlockCfg(type='edge', d=1, c=256, s=1, gs=0, br=1.0, block_kwargs=dict()),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1536, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=1536, s=2, gs=1, br=1.0),
),
stem_chs=128,
stem_type='quad',
stem_pool=None,
num_features=2048,
act_layer='silu',
block_kwargs=dict(extra_conv=True),
),
# A series of ResNeXt-26 models w/ one of none, GC, SE, ECA, BAT attn, group size 32, SiLU act,
# and a tiered stem w/ maxpool
resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
),
gcresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='gca',
),
seresnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='se',
),
eca_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='eca',
),
bat_resnext26ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
act_layer='silu',
attn_layer='bat',
attn_kwargs=dict(block_size=8)
),
# ResNet-32 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, no pre-fc feat layer, tiered stem w/o maxpool
resnet32ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=0,
act_layer='silu',
),
# ResNet-33 (2, 3, 3, 2) models w/ no attn, no groups, SiLU act, 1280 pre-FC feat, tiered stem w/o maxpool
resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
),
# A series of ResNet-33 (2, 3, 3, 2) models w/ one of GC, SE, ECA attn, no groups, SiLU act, 1280 pre-FC feat
# and a tiered stem w/ no maxpool
gcresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='gca',
),
seresnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='se',
),
eca_resnet33ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=512, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=1536, s=2, gs=0, br=0.25),
ByoBlockCfg(type='bottle', d=2, c=1536, s=2, gs=0, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
num_features=1280,
act_layer='silu',
attn_layer='eca',
),
gcresnet50t=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='',
attn_layer='gca',
),
gcresnext50ts=ByoModelCfg(
blocks=(
ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=4, c=512, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=6, c=1024, s=2, gs=32, br=0.25),
ByoBlockCfg(type='bottle', d=3, c=2048, s=2, gs=32, br=0.25),
),
stem_chs=64,
stem_type='tiered',
stem_pool='maxpool',
# stem_pool=None,
act_layer='silu',
attn_layer='gca',
),
)
@register_model
def gernet_l(pretrained=False, **kwargs):
""" GEResNet-Large (GENet-Large from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_l', pretrained=pretrained, **kwargs)
@register_model
def gernet_m(pretrained=False, **kwargs):
""" GEResNet-Medium (GENet-Normal from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_m', pretrained=pretrained, **kwargs)
@register_model
def gernet_s(pretrained=False, **kwargs):
""" EResNet-Small (GENet-Small from official impl)
`Neural Architecture Design for GPU-Efficient Networks` - https://arxiv.org/abs/2006.14090
"""
return _create_byobnet('gernet_s', pretrained=pretrained, **kwargs)
@register_model
def repvgg_a2(pretrained=False, **kwargs):
""" RepVGG-A2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_a2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b0(pretrained=False, **kwargs):
""" RepVGG-B0
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b0', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1(pretrained=False, **kwargs):
""" RepVGG-B1
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b1g4(pretrained=False, **kwargs):
""" RepVGG-B1g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2(pretrained=False, **kwargs):
""" RepVGG-B2
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b2g4(pretrained=False, **kwargs):
""" RepVGG-B2g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b2g4', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3(pretrained=False, **kwargs):
""" RepVGG-B3
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3', pretrained=pretrained, **kwargs)
@register_model
def repvgg_b3g4(pretrained=False, **kwargs):
""" RepVGG-B3g4
`Making VGG-style ConvNets Great Again` - https://arxiv.org/abs/2101.03697
"""
return _create_byobnet('repvgg_b3g4', pretrained=pretrained, **kwargs)
@register_model
def resnet51q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet51q', pretrained=pretrained, **kwargs)
@register_model
def resnet61q(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet61q', pretrained=pretrained, **kwargs)
@register_model
def resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def seresnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnext26ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def bat_resnext26ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('bat_resnext26ts', pretrained=pretrained, **kwargs)
@register_model
def resnet32ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet32ts', pretrained=pretrained, **kwargs)
@register_model
def resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def seresnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs)
@register_model
def eca_resnet33ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('eca_resnet33ts', pretrained=pretrained, **kwargs)
@register_model
def gcresnet50t(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnet50t', pretrained=pretrained, **kwargs)
@register_model
def gcresnext50ts(pretrained=False, **kwargs):
"""
"""
return _create_byobnet('gcresnext50ts', pretrained=pretrained, **kwargs)
def expand_blocks_cfg(stage_blocks_cfg: Union[ByoBlockCfg, Sequence[ByoBlockCfg]]) -> List[ByoBlockCfg]:
if not isinstance(stage_blocks_cfg, Sequence):
stage_blocks_cfg = (stage_blocks_cfg,)
block_cfgs = []
for i, cfg in enumerate(stage_blocks_cfg):
block_cfgs += [replace(cfg, d=1) for _ in range(cfg.d)]
return block_cfgs
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
@dataclass
class LayerFn:
conv_norm_act: Callable = ConvBnAct
norm_act: Callable = BatchNormAct2d
act: Callable = nn.ReLU
attn: Optional[Callable] = None
self_attn: Optional[Callable] = None
class DownsampleAvg(nn.Module):
def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn = None):
""" AvgPool Downsampling as in 'D' ResNet variants."""
super(DownsampleAvg, self).__init__()
layers = layers or LayerFn()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act)
def forward(self, x):
return self.conv(self.pool(x))
def create_downsample(downsample_type, layers: LayerFn, **kwargs):
if downsample_type == 'avg':
return DownsampleAvg(**kwargs)
else:
return layers.conv_norm_act(kwargs.pop('in_chs'), kwargs.pop('out_chs'), kernel_size=1, **kwargs)
class BasicBlock(nn.Module):
""" ResNet Basic Block - kxk + kxk
"""
def __init__(
self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), group_size=None, bottle_ratio=1.0,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(BasicBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0])
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
# residual path
x = self.conv1_kxk(x)
x = self.conv2_kxk(x)
x = self.attn(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class BottleneckBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - kxk - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', attn_last=False, linear_out=False, extra_conv=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(BottleneckBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
if extra_conv:
self.conv2b_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, dilation=dilation[1], groups=groups, drop_block=drop_block)
else:
self.conv2b_kxk = nn.Identity()
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.conv2b_kxk(x)
x = self.attn(x)
x = self.conv3_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class DarkBlock(nn.Module):
""" DarkNet-like (1x1 + 3x3 w/ stride) block
The GE-Net impl included a 1x1 + 3x3 block in their search space. It was not used in the feature models.
This block is pretty much a DarkNet block (also DenseNet) hence the name. Neither DarkNet or DenseNet
uses strides within the block (external 3x3 or maxpool downsampling is done in front of the block repeats).
If one does want to use a lot of these blocks w/ stride, I'd recommend using the EdgeBlock (3x3 /w stride + 1x1)
for more optimal compute.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=True, linear_out=False, layers: LayerFn = None, drop_block=None,
drop_path_rate=0.):
super(DarkBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_kxk = layers.conv_norm_act(
mid_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_kxk.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.attn(x)
x = self.conv2_kxk(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class EdgeBlock(nn.Module):
""" EdgeResidual-like (3x3 + 1x1) block
A two layer block like DarkBlock, but with the order of the 3x3 and 1x1 convs reversed.
Very similar to the EfficientNet Edge-Residual block but this block it ends with activations, is
intended to be used with either expansion or bottleneck contraction, and can use DW/group/non-grouped convs.
FIXME is there a more common 3x3 + 1x1 conv block to name this after?
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='avg', attn_last=False, linear_out=False, layers: LayerFn = None,
drop_block=None, drop_path_rate=0.):
super(EdgeBlock, self).__init__()
layers = layers or LayerFn()
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_kxk = layers.conv_norm_act(
in_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
self.attn = nn.Identity() if attn_last or layers.attn is None else layers.attn(mid_chs)
self.conv2_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.attn_last = nn.Identity() if not attn_last or layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv2_1x1.bn.weight)
for attn in (self.attn, self.attn_last):
if hasattr(attn, 'reset_parameters'):
attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_kxk(x)
x = self.attn(x)
x = self.conv2_1x1(x)
x = self.attn_last(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
class RepVggBlock(nn.Module):
""" RepVGG Block.
Adapted from impl at https://github.com/DingXiaoH/RepVGG
This version does not currently support the deploy optimization. It is currently fixed in 'train' mode.
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1.0, group_size=None,
downsample='', layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(RepVggBlock, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
use_ident = in_chs == out_chs and stride == 1 and dilation[0] == dilation[1]
self.identity = layers.norm_act(out_chs, apply_act=False) if use_ident else None
self.conv_kxk = layers.conv_norm_act(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block, apply_act=False)
self.conv_1x1 = layers.conv_norm_act(in_chs, out_chs, 1, stride=stride, groups=groups, apply_act=False)
self.attn = nn.Identity() if layers.attn is None else layers.attn(out_chs)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. and use_ident else nn.Identity()
self.act = layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
# NOTE this init overrides that base model init with specific changes for the block type
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight, .1, .1)
nn.init.normal_(m.bias, 0, .1)
if hasattr(self.attn, 'reset_parameters'):
self.attn.reset_parameters()
def forward(self, x):
if self.identity is None:
x = self.conv_1x1(x) + self.conv_kxk(x)
else:
identity = self.identity(x)
x = self.conv_1x1(x) + self.conv_kxk(x)
x = self.drop_path(x) # not in the paper / official impl, experimental
x = x + identity
x = self.attn(x) # no attn in the paper / official impl, experimental
x = self.act(x)
return x
class SelfAttnBlock(nn.Module):
""" ResNet-like Bottleneck Block - 1x1 - optional kxk - self attn - 1x1
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=1, dilation=(1, 1), bottle_ratio=1., group_size=None,
downsample='avg', extra_conv=False, linear_out=False, post_attn_na=True, feat_size=None,
layers: LayerFn = None, drop_block=None, drop_path_rate=0.):
super(SelfAttnBlock, self).__init__()
assert layers is not None
mid_chs = make_divisible(out_chs * bottle_ratio)
groups = num_groups(group_size, mid_chs)
if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]:
self.shortcut = create_downsample(
downsample, in_chs=in_chs, out_chs=out_chs, stride=stride, dilation=dilation[0],
apply_act=False, layers=layers)
else:
self.shortcut = nn.Identity()
self.conv1_1x1 = layers.conv_norm_act(in_chs, mid_chs, 1)
if extra_conv:
self.conv2_kxk = layers.conv_norm_act(
mid_chs, mid_chs, kernel_size, stride=stride, dilation=dilation[0],
groups=groups, drop_block=drop_block)
stride = 1 # striding done via conv if enabled
else:
self.conv2_kxk = nn.Identity()
opt_kwargs = {} if feat_size is None else dict(feat_size=feat_size)
# FIXME need to dilate self attn to have dilated network support, moop moop
self.self_attn = layers.self_attn(mid_chs, stride=stride, **opt_kwargs)
self.post_attn = layers.norm_act(mid_chs) if post_attn_na else nn.Identity()
self.conv3_1x1 = layers.conv_norm_act(mid_chs, out_chs, 1, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.act = nn.Identity() if linear_out else layers.act(inplace=True)
def init_weights(self, zero_init_last: bool = False):
if zero_init_last:
nn.init.zeros_(self.conv3_1x1.bn.weight)
if hasattr(self.self_attn, 'reset_parameters'):
self.self_attn.reset_parameters()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.conv1_1x1(x)
x = self.conv2_kxk(x)
x = self.self_attn(x)
x = self.post_attn(x)
x = self.conv3_1x1(x)
x = self.drop_path(x)
x = self.act(x + shortcut)
return x
_block_registry = dict(
basic=BasicBlock,
bottle=BottleneckBlock,
dark=DarkBlock,
edge=EdgeBlock,
rep=RepVggBlock,
self_attn=SelfAttnBlock,
)
def register_block(block_type:str, block_fn: nn.Module):
_block_registry[block_type] = block_fn
def create_block(block: Union[str, nn.Module], **kwargs):
if isinstance(block, (nn.Module, partial)):
return block(**kwargs)
assert block in _block_registry, f'Unknown block type ({block}'
return _block_registry[block](**kwargs)
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool',
num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn = None):
super().__init__()
assert stride in (2, 4)
layers = layers or LayerFn()
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round(out_chs * chs_decay ** i) for i in range(num_rep)][::-1]
self.stride = stride
self.feature_info = [] # track intermediate features
prev_feat = ''
stem_strides = [2] + [1] * (num_rep - 1)
if stride == 4 and not pool:
# set last conv in stack to be strided if stride == 4 and no pooling layer
stem_strides[-1] = 2
num_act = num_rep if num_act is None else num_act
# if num_act < num_rep, first convs in stack won't have bn + act
stem_norm_acts = [False] * (num_rep - num_act) + [True] * num_act
prev_chs = in_chs
curr_stride = 1
for i, (ch, s, na) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = layers.conv_norm_act if na else create_conv2d
conv_name = f'conv{i + 1}'
if i > 0 and s > 1:
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if pool and 'max' in pool.lower():
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert curr_stride == stride
def create_byob_stem(in_chs, out_chs, stem_type='', pool_type='', feat_prefix='stem', layers: LayerFn = None):
layers = layers or LayerFn()
assert stem_type in ('', 'quad', 'quad2', 'tiered', 'deep', 'rep', '7x7', '3x3')
if 'quad' in stem_type:
# based on NFNet stem, stack of 4 3x3 convs
num_act = 2 if 'quad2' in stem_type else None
stem = Stem(in_chs, out_chs, num_rep=4, num_act=num_act, pool=pool_type, layers=layers)
elif 'tiered' in stem_type:
# 3x3 stack of 3 convs as in my ResNet-T
stem = Stem(in_chs, (3 * out_chs // 8, out_chs // 2, out_chs), pool=pool_type, layers=layers)
elif 'deep' in stem_type:
# 3x3 stack of 3 convs as in ResNet-D
stem = Stem(in_chs, out_chs, num_rep=3, chs_decay=1.0, pool=pool_type, layers=layers)
elif 'rep' in stem_type:
stem = RepVggBlock(in_chs, out_chs, stride=2, layers=layers)
elif '7x7' in stem_type:
# 7x7 stem conv as in ResNet
if pool_type:
stem = Stem(in_chs, out_chs, 7, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 7, stride=2)
else:
# 3x3 stem conv as in RegNet is the default
if pool_type:
stem = Stem(in_chs, out_chs, 3, num_rep=1, pool=pool_type, layers=layers)
else:
stem = layers.conv_norm_act(in_chs, out_chs, 3, stride=2)
if isinstance(stem, Stem):
feature_info = [dict(f, module='.'.join([feat_prefix, f['module']])) for f in stem.feature_info]
else:
feature_info = [dict(num_chs=out_chs, reduction=2, module=feat_prefix)]
return stem, feature_info
def reduce_feat_size(feat_size, stride=2):
return None if feat_size is None else tuple([s // stride for s in feat_size])
def override_kwargs(block_kwargs, model_kwargs):
""" Override model level attn/self-attn/block kwargs w/ block level
NOTE: kwargs are NOT merged across levels, block_kwargs will fully replace model_kwargs
for the block if set to anything that isn't None.
i.e. an empty block_kwargs dict will remove kwargs set at model level for that block
"""
out_kwargs = block_kwargs if block_kwargs is not None else model_kwargs
return out_kwargs or {} # make sure None isn't returned
def update_block_kwargs(block_kwargs: Dict[str, Any], block_cfg: ByoBlockCfg, model_cfg: ByoModelCfg, ):
layer_fns = block_kwargs['layers']
# override attn layer / args with block local config
attn_set = block_cfg.attn_layer is not None
if attn_set or block_cfg.attn_kwargs is not None:
# override attn layer config
if attn_set and not block_cfg.attn_layer:
# empty string for attn_layer type will disable attn for this block
attn_layer = None
else:
attn_kwargs = override_kwargs(block_cfg.attn_kwargs, model_cfg.attn_kwargs)
attn_layer = block_cfg.attn_layer or model_cfg.attn_layer
attn_layer = partial(get_attn(attn_layer), **attn_kwargs) if attn_layer is not None else None
layer_fns = replace(layer_fns, attn=attn_layer)
# override self-attn layer / args with block local cfg
self_attn_set = block_cfg.self_attn_layer is not None
if self_attn_set or block_cfg.self_attn_kwargs is not None:
# override attn layer config
if self_attn_set and not block_cfg.self_attn_layer: # attn_layer == ''
# empty string for self_attn_layer type will disable attn for this block
self_attn_layer = None
else:
self_attn_kwargs = override_kwargs(block_cfg.self_attn_kwargs, model_cfg.self_attn_kwargs)
self_attn_layer = block_cfg.self_attn_layer or model_cfg.self_attn_layer
self_attn_layer = partial(get_attn(self_attn_layer), **self_attn_kwargs) \
if self_attn_layer is not None else None
layer_fns = replace(layer_fns, self_attn=self_attn_layer)
block_kwargs['layers'] = layer_fns
# add additional block_kwargs specified in block_cfg or model_cfg, precedence to block if set
block_kwargs.update(override_kwargs(block_cfg.block_kwargs, model_cfg.block_kwargs))
def create_byob_stages(
cfg: ByoModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any],
feat_size: Optional[int] = None,
layers: Optional[LayerFn] = None,
block_kwargs_fn: Optional[Callable] = update_block_kwargs):
layers = layers or LayerFn()
feature_info = []
block_cfgs = [expand_blocks_cfg(s) for s in cfg.blocks]
depths = [sum([bc.d for bc in stage_bcs]) for stage_bcs in block_cfgs]
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
dilation = 1
net_stride = stem_feat['reduction']
prev_chs = stem_feat['num_chs']
prev_feat = stem_feat
stages = []
for stage_idx, stage_block_cfgs in enumerate(block_cfgs):
stride = stage_block_cfgs[0].s
if stride != 1 and prev_feat:
feature_info.append(prev_feat)
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx, block_cfg in enumerate(stage_block_cfgs):
out_chs = make_divisible(block_cfg.c * cfg.width_factor)
group_size = block_cfg.gs
if isinstance(group_size, Callable):
group_size = group_size(out_chs, block_idx)
block_kwargs = dict( # Blocks used in this model must accept these arguments
in_chs=prev_chs,
out_chs=out_chs,
stride=stride if block_idx == 0 else 1,
dilation=(first_dilation, dilation),
group_size=group_size,
bottle_ratio=block_cfg.br,
downsample=cfg.downsample,
drop_path_rate=dpr[stage_idx][block_idx],
layers=layers,
)
if block_cfg.type in ('self_attn',):
# add feat_size arg for blocks that support/need it
block_kwargs['feat_size'] = feat_size
block_kwargs_fn(block_kwargs, block_cfg=block_cfg, model_cfg=cfg)
blocks += [create_block(block_cfg.type, **block_kwargs)]
first_dilation = dilation
prev_chs = out_chs
if stride > 1 and block_idx == 0:
feat_size = reduce_feat_size(feat_size, stride)
stages += [nn.Sequential(*blocks)]
prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')
feature_info.append(prev_feat)
return nn.Sequential(*stages), feature_info
def get_layer_fns(cfg: ByoModelCfg):
act = get_act_layer(cfg.act_layer)
norm_act = convert_norm_act(norm_layer=cfg.norm_layer, act_layer=act)
conv_norm_act = partial(ConvBnAct, norm_layer=cfg.norm_layer, act_layer=act)
attn = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
self_attn = partial(get_attn(cfg.self_attn_layer), **cfg.self_attn_kwargs) if cfg.self_attn_layer else None
layer_fn = LayerFn(conv_norm_act=conv_norm_act, norm_act=norm_act, act=act, attn=attn, self_attn=self_attn)
return layer_fn
class ByobNet(nn.Module):
""" 'Bring-your-own-blocks' Net
A flexible network backbone that allows building model stem + blocks via
dataclass cfg definition w/ factory functions for module instantiation.
Current assumption is that both stem and blocks are in conv-bn-act order (w/ block ending in act).
"""
def __init__(self, cfg: ByoModelCfg, num_classes=1000, in_chans=3, global_pool='avg', output_stride=32,
zero_init_last=True, img_size=None, drop_rate=0., drop_path_rate=0.):
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
layers = get_layer_fns(cfg)
if cfg.fixed_input_size:
assert img_size is not None, 'img_size argument is required for fixed input size model'
feat_size = to_2tuple(img_size) if img_size is not None else None
self.feature_info = []
stem_chs = int(round((cfg.stem_chs or cfg.blocks[0].c) * cfg.width_factor))
self.stem, stem_feat = create_byob_stem(in_chans, stem_chs, cfg.stem_type, cfg.stem_pool, layers=layers)
self.feature_info.extend(stem_feat[:-1])
feat_size = reduce_feat_size(feat_size, stride=stem_feat[-1]['reduction'])
self.stages, stage_feat = create_byob_stages(
cfg, drop_path_rate, output_stride, stem_feat[-1], layers=layers, feat_size=feat_size)
self.feature_info.extend(stage_feat[:-1])
prev_chs = stage_feat[-1]['num_chs']
if cfg.num_features:
self.num_features = int(round(cfg.width_factor * cfg.num_features))
self.final_conv = layers.conv_norm_act(prev_chs, self.num_features, 1)
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.feature_info += [
dict(num_chs=self.num_features, reduction=stage_feat[-1]['reduction'], module='final_conv')]
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
# init weights
named_apply(partial(_init_weights, zero_init_last=zero_init_last), self)
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.final_conv(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module, name='', zero_init_last=False):
if isinstance(module, nn.Conv2d):
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
module.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, mean=0.0, std=0.01)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights(zero_init_last=zero_init_last)
def _create_byobnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
default_cfg=default_cfgs[variant],
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
| [
"torch.nn.Identity",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.ones_",
"torch.nn.init.normal_",
"torch.nn.init.zeros_"
] | 1.4.0 | KnockerPulsar/pytorch-image-models | 893f5dde27ae6b17389f738bd6e37160e2868c72 |
1.8 | # -*- coding: utf-8 -*-
# @Time : 6/10/21 5:04 PM
# @Author : Yuan Gong
# @Affiliation : Massachusetts Institute of Technology
# @Email : [email protected]
# @File : ast_models.py
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
import wget
os.environ['TORCH_HOME'] = '../../pretrained_models'
import timm
from timm.models.layers import to_2tuple, trunc_normal_
# override the timm package to relax the input shape constraint.
class PatchEmbed(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class ASTModel(nn.Module):
"""
The AST model.
:param label_dim: the label dimension, i.e., the number of total classes, it is 527 for AudioSet, 50 for ESC-50, and 35 for speechcommands v2-35
:param fstride: the stride of patch spliting on the frequency dimension, for 16*16 patchs, fstride=16 means no overlap, fstride=10 means overlap of 6
:param tstride: the stride of patch spliting on the time dimension, for 16*16 patchs, tstride=16 means no overlap, tstride=10 means overlap of 6
:param input_fdim: the number of frequency bins of the input spectrogram
:param input_tdim: the number of time frames of the input spectrogram
:param imagenet_pretrain: if use ImageNet pretrained model
:param audioset_pretrain: if use full AudioSet and ImageNet pretrained model
:param model_size: the model size of AST, should be in [tiny224, small224, base224, base384], base224 and base 384 are same model, but are trained differently during ImageNet pretraining.
"""
def __init__(self, label_dim=3, fstride=10, tstride=10, input_fdim=128, input_tdim=1024, imagenet_pretrain=True,
audioset_pretrain=True, model_size='base384', verbose=True):
super(ASTModel, self).__init__()
assert timm.__version__ == '0.4.5', 'Please use timm == 0.4.5, the code might not be compatible with newer versions.'
if verbose == True:
print('---------------AST Model Summary---------------')
print('ImageNet pretraining: {:s}, AudioSet pretraining: {:s}'.format(str(imagenet_pretrain),
str(audioset_pretrain)))
# override timm input shape restriction
timm.models.vision_transformer.PatchEmbed = PatchEmbed
# if AudioSet pretraining is not used (but ImageNet pretraining may still apply)
if audioset_pretrain == False:
if model_size == 'tiny224':
self.v = timm.create_model('vit_deit_tiny_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'small224':
self.v = timm.create_model('vit_deit_small_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base224':
self.v = timm.create_model('vit_deit_base_distilled_patch16_224', pretrained=imagenet_pretrain)
elif model_size == 'base384':
self.v = timm.create_model('vit_deit_base_distilled_patch16_384', pretrained=imagenet_pretrain)
else:
raise Exception('Model size must be one of tiny224, small224, base224, base384.')
self.original_num_patches = self.v.patch_embed.num_patches
self.oringal_hw = int(self.original_num_patches ** 0.5)
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
# automatcially get the intermediate shape
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
# the linear projection layer
new_proj = torch.nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
if imagenet_pretrain == True:
new_proj.weight = torch.nn.Parameter(torch.sum(self.v.patch_embed.proj.weight, dim=1).unsqueeze(1))
new_proj.bias = self.v.patch_embed.proj.bias
self.v.patch_embed.proj = new_proj
# the positional embedding
if imagenet_pretrain == True:
# get the positional embedding from deit model, skip the first two tokens (cls token and distillation token), reshape it to original 2D shape (24*24).
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, self.original_num_patches,
self.original_embedding_dim).transpose(1,
2).reshape(
1, self.original_embedding_dim, self.oringal_hw, self.oringal_hw)
# cut (from middle) or interpolate the second dimension of the positional embedding
if t_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :, :,
int(self.oringal_hw / 2) - int(t_dim / 2): int(self.oringal_hw / 2) - int(
t_dim / 2) + t_dim]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(self.oringal_hw, t_dim),
mode='bilinear')
# cut (from middle) or interpolate the first dimension of the positional embedding
if f_dim <= self.oringal_hw:
new_pos_embed = new_pos_embed[:, :,
int(self.oringal_hw / 2) - int(f_dim / 2): int(self.oringal_hw / 2) - int(
f_dim / 2) + f_dim, :]
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(f_dim, t_dim), mode='bilinear')
# flatten the positional embedding
new_pos_embed = new_pos_embed.reshape(1, self.original_embedding_dim, num_patches).transpose(1, 2)
# concatenate the above positional embedding with the cls token and distillation token of the deit model.
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
else:
# if not use imagenet pretrained model, just randomly initialize a learnable positional embedding
# TODO can use sinusoidal positional embedding instead
new_pos_embed = nn.Parameter(
torch.zeros(1, self.v.patch_embed.num_patches + 2, self.original_embedding_dim))
self.v.pos_embed = new_pos_embed
trunc_normal_(self.v.pos_embed, std=.02)
# now load a model that is pretrained on both ImageNet and AudioSet
elif audioset_pretrain == True:
if audioset_pretrain == True and imagenet_pretrain == False:
raise ValueError(
'currently model pretrained on only audioset is not supported, please set imagenet_pretrain = True to use audioset pretrained model.')
if model_size != 'base384':
raise ValueError('currently only has base384 AudioSet pretrained model.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if os.path.exists('../../pretrained_models/audioset_10_10_0.4593.pth') == False:
# this model performs 0.4593 mAP on the audioset eval set
audioset_mdl_url = 'https://www.dropbox.com/s/cv4knew8mvbrnvq/audioset_0.4593.pth?dl=1'
wget.download(audioset_mdl_url, out='../../pretrained_models/audioset_10_10_0.4593.pth')
sd = torch.load('../../pretrained_models/audioset_10_10_0.4593.pth', map_location=device)
# sd = torch.load('../../pretrained_models/ast_audioset.pth', map_location=device)
audio_model = ASTModel(label_dim=527, fstride=10, tstride=10, input_fdim=128, input_tdim=1024,
imagenet_pretrain=False, audioset_pretrain=False, model_size='base384',
verbose=False)
audio_model = torch.nn.DataParallel(audio_model)
print("***************USING=>", torch.cuda.current_device())
audio_model.load_state_dict(sd, strict=False)
self.v = audio_model.module.v
self.original_embedding_dim = self.v.pos_embed.shape[2]
self.mlp_head = nn.Sequential(nn.LayerNorm(self.original_embedding_dim),
nn.Linear(self.original_embedding_dim, label_dim))
f_dim, t_dim = self.get_shape(fstride, tstride, input_fdim, input_tdim)
num_patches = f_dim * t_dim
self.v.patch_embed.num_patches = num_patches
if verbose == True:
print('frequncey stride={:d}, time stride={:d}'.format(fstride, tstride))
print('number of patches={:d}'.format(num_patches))
new_pos_embed = self.v.pos_embed[:, 2:, :].detach().reshape(1, 1212, 768).transpose(1, 2).reshape(1, 768,
12, 101)
# if the input sequence length is larger than the original audioset (10s), then cut the positional embedding
if t_dim < 101:
new_pos_embed = new_pos_embed[:, :, :, 50 - int(t_dim / 2): 50 - int(t_dim / 2) + t_dim]
# otherwise interpolate
else:
new_pos_embed = torch.nn.functional.interpolate(new_pos_embed, size=(12, t_dim), mode='bilinear')
print("NEW POST EMBED:", new_pos_embed.shape)
new_pos_embed = new_pos_embed.reshape(1, 768, num_patches).transpose(1, 2)
print("NEW POST EMBED:", new_pos_embed.shape)
self.v.pos_embed = nn.Parameter(torch.cat([self.v.pos_embed[:, :2, :].detach(), new_pos_embed], dim=1))
def get_shape(self, fstride, tstride, input_fdim=128, input_tdim=1024):
test_input = torch.randn(1, 1, input_fdim, input_tdim)
test_proj = nn.Conv2d(1, self.original_embedding_dim, kernel_size=(16, 16), stride=(fstride, tstride))
test_out = test_proj(test_input)
f_dim = test_out.shape[2]
t_dim = test_out.shape[3]
return f_dim, t_dim
@autocast()
def forward(self, x):
"""
:param x: the input spectrogram, expected shape: (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
:return: prediction
"""
# expect input x = (batch_size, time_frame_num, frequency_bins), e.g., (12, 1024, 128)
x = x.unsqueeze(1)
x = x.transpose(2, 3)
B = x.shape[0]
x = self.v.patch_embed(x)
cls_tokens = self.v.cls_token.expand(B, -1, -1)
dist_token = self.v.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.v.pos_embed
x = self.v.pos_drop(x)
for blk in self.v.blocks:
x = blk(x)
x = self.v.norm(x)
x = (x[:, 0] + x[:, 1]) / 2
# x = self.mlp_head(x)
return x
# if __name__ == '__main__':
# input_tdim = 100
# ast_mdl = ASTModel(input_tdim=input_tdim)
# # input a batch of 10 spectrogram, each with 100 time frames and 128 frequency bins
# test_input = torch.rand([10, input_tdim, 128])
# test_output = ast_mdl(test_input)
# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
# print(test_output.shape)
#
# input_tdim = 512
# ast_mdl = ASTModel(input_tdim=input_tdim, label_dim=50, audioset_pretrain=True)
# # input a batch of 10 spectrogram, each with 512 time frames and 128 frequency bins
# test_input = torch.rand([10, input_tdim, 128])
# test_output = ast_mdl(test_input)
# # output should be in shape [10, 527], i.e., 10 samples, each with prediction of 527 classes.
# print(test_output.shape)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.LayerNorm",
"torch.cuda.amp.autocast",
"torch.nn.functional.interpolate",
"torch.cuda.current_device",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.load",
"torch.randn",
"torch.nn.DataParallel",
"torch.sum"
] | 1.8.1 | jvel07/ast | 600e7cf952ec59ac9cc1bb3170d3da7578e1f384 |
1.7 | import torch
from ..utils.stream import ItemFeature
from .base_infocpler import BaseInfoCpler
class OCRVQAInfoCpler(BaseInfoCpler):
def __init__(self, cfg):
super().__init__(cfg)
def complete_info(self, item_feature: ItemFeature):
tokens = self.tokenizer.tokenize(item_feature.question.strip())
tokens = self.tokenizer.get_limited_tokens(tokens, self.max_seq_length - 2)
tokens, input_lm_label_ids = self.tokenizer.random_mask_tokens(tokens, self.word_mask_ratio)
tokens = [self._CLS_TOKEN] + tokens + [self._SEP_TOEKN]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(tokens)
input_segment = [0] * len(tokens)
input_lm_label_ids = [-1] * len(tokens)
# while len(input_ids) < self.max_seq_length:
# input_ids.append(int(self.pad_idx))
# input_mask.append(0)
# input_segment.append(0)
# input_lm_label_ids.append(-1)
to_extd_length = self.max_seq_length - len(input_ids)
self.info_extend(to_extd_length, (input_ids, int(self.pad_idx)), (input_mask, 0), (input_segment, 0),
(input_lm_label_ids, -1))
# ocr vectors
ocr_tokens = self.tokenizer.get_limited_tokens(item_feature.ocr_tokens, self.max_ocr_length)
item_feature.ocr_vectors_glove = self.get_tokens_glove_vectors(ocr_tokens)
item_feature.ocr_vectors_order = self.get_tokens_order_vectors(ocr_tokens)
item_feature.ocr_vectors_phoc = self.get_tokens_phoc_vectors(ocr_tokens)
item_feature.ocr_vectors_fasttext = self.get_tokens_fasttext_vectors(ocr_tokens)
# ocr features and bboxes
features_ocr = torch.zeros(
(self.max_ocr_length,
item_feature.features_ocr.shape[1] if item_feature.features_ocr is not None else 2048),
dtype=torch.float)
bbox_ocr_normalized = torch.zeros(
(self.max_ocr_length,
item_feature.ocr_normalized_boxes.shape[1] if item_feature.ocr_normalized_boxes is not None else 4),
dtype=torch.float)
if item_feature.features_ocr is not None:
limit = min(self.max_ocr_length, len(item_feature.features_ocr))
features_ocr[:limit] = torch.tensor(item_feature.features_ocr[:limit])
bbox_ocr_normalized[:limit] = torch.tensor(item_feature.ocr_normalized_boxes[:limit])
item_feature.features_ocr = features_ocr
item_feature.ocr_normalized_boxes = bbox_ocr_normalized
# features and bboxes
img_h = item_feature.image_height
img_w = item_feature.image_width
item_feature.bbox = self._get_bbox_from_normalized(item_feature.obj_normalized_boxes, img_h, img_w)
item_feature.bbox_normalized = item_feature.obj_normalized_boxes
item_feature.bbox_ocr = self._get_bbox_from_normalized(item_feature.ocr_normalized_boxes, img_h, img_w)
item_feature.bbox_ocr_normalized = item_feature.ocr_normalized_boxes
item_feature.input_ids = torch.tensor(input_ids, dtype=torch.long)
item_feature.input_mask = torch.tensor(input_mask, dtype=torch.int)
item_feature.input_segment = torch.tensor(input_segment, dtype=torch.int)
item_feature.input_lm_label_ids = torch.tensor(input_lm_label_ids, dtype=torch.long)
item_feature.qa_ids = [self.qa_ans2id[ans] for ans in item_feature.answers if ans in self.qa_ans2id]
# item_feature.qa_allids = [self.qa_ans2id[ans] for ans in item_feature.all_answers if ans in self.qa_ans2id]
item_feature.answers_scores = self.compute_answers_scores(torch.Tensor(item_feature.qa_ids))
return item_feature
| [
"torch.zeros",
"torch.Tensor",
"torch.tensor"
] | 1.7.1 | linxi1158/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.weight_norm import weight_norm
from ..builder import BACKBONES
@BACKBONES.register_module()
class CAGRAPH_BACKBONE(nn.Module):
def __init__(self, rnn_type, nlayers, ninp, nhid, dropout):
super().__init__()
self.d = dropout
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
self.rnn_type = rnn_type
self.neighbourhood_size = 8
self.Wq_1 = nn.Linear(self.nhid, self.nhid) # attention
self.Wh_1 = nn.Linear(self.nhid, self.nhid)
self.Wa_1 = nn.Linear(self.nhid, 1)
self.ref_att = FCNet([self.nhid, self.nhid])
self.Wqt = nn.Linear(self.nhid, 1)
self.ref_att2 = FCNet([self.nhid, self.nhid])
self.Wqt2 = nn.Linear(self.nhid, 1)
self.ref_att3 = FCNet([self.nhid, self.nhid])
self.Wqt3 = nn.Linear(self.nhid, 1)
self.W1 = nn.Linear(self.nhid, self.nhid)
self.W2 = nn.Linear(self.nhid, self.nhid)
self.W3 = nn.Linear(self.nhid * 2, self.nhid)
self.W4 = nn.Linear(self.ninp, self.nhid)
self.W5 = nn.Linear(self.nhid * 2, self.nhid)
self.W6 = nn.Linear(self.nhid * 2, self.nhid)
self.W7 = nn.Linear(self.ninp, self.nhid)
self.W8 = nn.Linear(self.nhid * 2, self.nhid)
self.W9 = nn.Linear(self.nhid * 2, self.nhid)
self.W10 = nn.Linear(self.nhid, self.nhid)
self.W11 = nn.Linear(self.nhid, 1)
self.fc1 = nn.Linear(self.nhid * 4, self.ninp)
def forward(self, ques_feat, his_feat, rcnn_feat, ques_emb, rnd):
L = ques_emb.size(0)
# history attention ##############################
ques_emb_1 = self.Wq_1(ques_feat[-1]).view(-1, 1, self.nhid)
his_emb_1 = self.Wh_1(his_feat).view(-1, rnd, self.nhid)
atten_emb_1 = F.tanh(his_emb_1 + ques_emb_1.expand_as(his_emb_1))
his_atten_weight = F.softmax(
self.Wa_1(F.dropout(atten_emb_1, self.d, training=self.training).view(-1, self.nhid)).view(-1, rnd))
h_emb = torch.bmm(his_atten_weight.view(-1, 1, rnd), his_feat.view(-1, rnd, self.nhid))
# graph constrution ############################
graph = torch.cat((rcnn_feat, h_emb.expand_as(rcnn_feat)), dim=2)
# T == 1 #######################################
# question command #############################
q_norm = F.normalize(self.ref_att(ques_feat.transpose(0, 1)), p=2, dim=-1)
at = F.softmax(self.Wqt(F.dropout(q_norm, self.d, training=self.training).view(-1, self.nhid)).view(-1, L))
q_c = torch.bmm(at.view(-1, 1, L), ques_emb.transpose(0, 1)).squeeze(1)
# belief_matrix #############################
mes_b = self.W3(graph) * self.W4(q_c).unsqueeze(1)
belief_mat = torch.bmm(self.W5(graph), mes_b.transpose(1, 2))
# belief = F.softmax(belief_mat, dim=2)
# message passing ###########################
mes = self.W6(graph) * self.W7(q_c).unsqueeze(1)
sum_mes = self._create_neighbourhood(mes, belief_mat, self.neighbourhood_size)
context_1 = self.W8(torch.cat((h_emb.expand_as(rcnn_feat), sum_mes), dim=2))
graph2 = torch.cat((rcnn_feat, context_1), dim=2)
# T == 2 #######################################
# question command #############################
q_norm2 = F.normalize(self.ref_att2(ques_feat.transpose(0, 1)), p=2, dim=-1)
at2 = F.softmax(self.Wqt2(F.dropout(q_norm2, self.d, training=self.training).view(-1, self.nhid)).view(-1, L))
q_c2 = torch.bmm(at2.view(-1, 1, L), ques_emb.transpose(0, 1)).squeeze(1)
# belief_matrix #############################
mes_b2 = self.W3(graph2) * self.W4(q_c2).unsqueeze(1)
belief_mat2 = torch.bmm(self.W5(graph2), mes_b2.transpose(1, 2))
# belief2 = F.softmax(belief_mat2, dim=2)
# message passing ###########################
mes2 = self.W6(graph2) * self.W7(q_c2).unsqueeze(1)
sum_mes2 = self._create_neighbourhood(mes2, belief_mat2, self.neighbourhood_size)
context_2 = self.W8(torch.cat((context_1, sum_mes2), dim=2))
graph3 = torch.cat((rcnn_feat, context_2), dim=2)
# T == 3 #######################################
# question command #############################
q_norm3 = F.normalize(self.ref_att3(ques_feat.transpose(0, 1)), p=2, dim=-1)
at3 = F.softmax(self.Wqt3(F.dropout(q_norm3, self.d, training=self.training).view(-1, self.nhid)).view(-1, L))
q_c3 = torch.bmm(at3.view(-1, 1, L), ques_emb.transpose(0, 1)).squeeze(1)
# belief_matrix #############################
mes_b3 = self.W3(graph3) * self.W4(q_c3).unsqueeze(1)
belief_mat3 = torch.bmm(self.W5(graph3), mes_b3.transpose(1, 2))
# belief3 = F.softmax(belief_mat3, dim=2)
# message passing ###########################
mes3 = self.W6(graph3) * self.W7(q_c3).unsqueeze(1)
sum_mes3 = self._create_neighbourhood(mes3, belief_mat3, self.neighbourhood_size)
context_3 = self.W8(torch.cat((context_2, sum_mes3), dim=2))
graph4 = torch.cat((rcnn_feat, context_3), dim=2)
# Graph Attention ##############################
g2_emb = self.W9(graph4).view(-1, 36, self.nhid)
q_emb = self.W10(ques_feat[-1]).view(-1, 1, self.nhid)
att_gq_emb = F.tanh(g2_emb + q_emb.expand_as(g2_emb))
graph_att = F.softmax(
self.W11(F.dropout(att_gq_emb, self.d, training=self.training).view(-1, self.nhid)).view(-1,
36)).unsqueeze(1)
graph_emb = torch.bmm(graph_att, graph4)
# Multi-modal Fusion ############################
concat_feat = torch.cat(
(graph_emb.view(-1, 2 * self.nhid), ques_feat[-1].view(-1, self.nhid), h_emb.view(-1, self.nhid)), 1)
final_feat = F.tanh(self.fc1(F.dropout(concat_feat, self.d, training=self.training)))
return final_feat
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz,
self.nhid).zero_()), Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
def _create_neighbourhood_mes(self, message, top_ind):
"""## Inputs:
- message (batch_size, K, feat_dim)
- top_ind (batch_size, K, neighbourhood_size)
## Returns:
- neighbourhood_message (batch_size, K, neighbourhood_size, feat_dim)
"""
batch_size = message.size(0)
K = message.size(1)
feat_dim = message.size(2)
neighbourhood_size = top_ind.size(-1)
message = message.unsqueeze(1).expand(batch_size, K, K, feat_dim)
idx = top_ind.unsqueeze(-1).expand(batch_size, K, neighbourhood_size, feat_dim)
return torch.gather(message, dim=2, index=idx)
def _create_neighbourhood(self, message, belief_matrix, neighbourhood_size):
"""Creates a neighbourhood system for each graph node/image object.
## Inputs:
- message (batch_size, K, feat_dim): input message features
- adjacency_matrix (batch_size, K, K): learned adjacency matrix
- neighbourhood_size (int)
- weight (bool): specify if the features should be weighted by the adjacency matrix values
## Returns:
- sum_messages (batch_size, K, neighbourhood_size, feat_dim)
"""
# Number of graph nodes
K = message.size(1)
# pdb.set_trace()
# extract top k neighbours for each node and normalise
top_k, top_ind = torch.topk(belief_matrix, k=neighbourhood_size, dim=-1, sorted=False)
top_k = torch.stack([F.softmax(top_k[:, k])
for k in range(K)]).transpose(0, 1) # (batch_size, K, neighbourhood_size)
# extract top k features
neighbourhood_mes = self._create_neighbourhood_mes(message, top_ind)
sum_mes = torch.sum(top_k.unsqueeze(-1) * neighbourhood_mes, dim=2)
return sum_mes
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network."""
def __init__(self, dims, dropout=0.2):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims) - 2):
in_dim = dims[i]
out_dim = dims[i + 1]
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.Tanh())
if 0 < dropout:
layers.append(nn.Dropout(dropout))
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.Sigmoid())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.gather",
"torch.nn.Sequential",
"torch.nn.Sigmoid",
"torch.nn.functional.dropout",
"torch.nn.Tanh",
"torch.bmm",
"torch.nn.functional.softmax",
"torch.topk"
] | 1.7.1 | linxi1158/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 |
1.7 | import torch
from ..builder import VOCAB
from .baseprocessor import BaseProcessor
@VOCAB.register_module()
class VocabProcessor(BaseProcessor):
"""Use VocabProcessor when you have vocab file and you want to process
words to indices. Expects UNK token as "<unk>" and pads sentences using
"<pad>" token. Config parameters can have ``preprocessor`` property which
is used to preprocess the item passed and ``max_length`` property which
points to maximum length of the sentence/tokens which can be convert to
indices. If the length is smaller, the sentence will be padded. Parameters
for "vocab" are necessary to be passed.
**Key**: vocab
Example Config::
task_attributes:
vqa:
vqa2:
processors:
text_processor:
type: vocab
params:
max_length: 14
vocab:
type: intersected
embedding_name: glove.6B.300d
vocab_file: vocabs/vocabulary_100k.txt
Args:
config (DictConfig): node containing configuration parameters of
the processor
Attributes:
vocab (Vocab): Vocab class object which is abstraction over the vocab
file passed.
"""
MAX_LENGTH_DEFAULT = 50
PAD_TOKEN = '<pad>'
PAD_INDEX = 0
def __init__(self,
vocab=dict(
type='IntersectedVocab',
vocab_file='textvqa/defaults/extras/vocabs/vocabulary_100k.txt',
embedding_name='glove.6B.300d'),
preprocessor=dict(type='SimpleSentenceProcessor'),
*args,
**kwargs):
# self.vocab = Vocab(*args, **config.vocab, **kwargs)
# self.vocab = build_vocab(vocab)
self.vocab = None
self.max_length = self.MAX_LENGTH_DEFAULT
# self.preprocessor = build_preprocessor(preprocessor)
self.preprocessor = None
# self._init_extras(config)
# def _init_extras(self, config, *args, **kwargs):
# self.writer = registry.get("writer")
# self.preprocessor = None
#
# if hasattr(config, "max_length"):
# self.max_length = config.max_length
# else:
# warnings.warn(
# "No 'max_length' parameter in Processor's "
# "configuration. Setting to {}.".format(self.MAX_LENGTH_DEFAULT)
# )
# self.max_length = self.MAX_LENGTH_DEFAULT
#
# if "preprocessor" in config:
# self.preprocessor = Processor(config.preprocessor, *args, **kwargs)
#
# if self.preprocessor is None:
# raise ValueError(
# f"No text processor named {config.preprocessor} is defined."
# )
def __call__(self, item):
"""Call requires item to have either "tokens" attribute or either
"text" attribute. If "text" is present, it will tokenized using the
preprocessor.
Args:
item (Dict): Dict containing the "text" or "tokens".
Returns:
Dict: Dict containing indices in "text" key, "tokens" in "tokens"
key and "length" of the string in "length" key.
"""
indices = None
if not isinstance(item, dict):
raise TypeError('Argument passed to the processor must be '
"a dict with either 'text' or 'tokens' as "
'keys')
if 'tokens' in item:
tokens = item['tokens']
indices = self._map_strings_to_indices(item['tokens'])
elif 'text' in item:
if self.preprocessor is None:
raise AssertionError('If tokens are not provided, a text ' 'processor must be defined in the config')
tokens = self.preprocessor({'text': item['text']})['text']
indices = self._map_strings_to_indices(tokens)
else:
raise AssertionError("A dict with either 'text' or 'tokens' keys " 'must be passed to the processor')
tokens, length = self._pad_tokens(tokens)
return {'text': indices, 'tokens': tokens, 'length': length}
def _pad_tokens(self, tokens):
padded_tokens = [self.PAD_TOKEN] * self.max_length
token_length = min(len(tokens), self.max_length)
padded_tokens[:token_length] = tokens[:token_length]
token_length = torch.tensor(token_length, dtype=torch.long)
return padded_tokens, token_length
def get_pad_index(self):
"""Get index of padding <pad> token in vocabulary.
Returns:
int: index of the padding token.
"""
return self.vocab.get_pad_index()
def get_vocab_size(self):
"""Get size of the vocabulary.
Returns:
int: size of the vocabulary.
"""
return self.vocab.get_size()
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
output = torch.zeros(self.max_length, dtype=torch.long)
output.fill_(self.vocab.get_pad_index())
for idx, token in enumerate(tokens):
output[idx] = self.vocab.stoi[token]
return output
| [
"torch.zeros",
"torch.tensor"
] | 1.7.1 | linxi1158/iMIX | 99898de97ef8b45462ca1d6bf2542e423a73d769 |
1.7 | import math
from bisect import bisect_right, bisect
from typing import List
from functools import lru_cache
import torch
from .builder import LR_SCHEDULERS
from torch.optim.lr_scheduler import LambdaLR, _LRScheduler
from .optimization import BertAdam
import imix.utils.distributed_info as comm
import logging
from transformers.optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
@LR_SCHEDULERS.register_module()
class WarmupMultiStepLR(_LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
*,
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = 'linear',
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of' ' increasing integers. Got {}', milestones)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_iters,
self.warmup_factor)
@lru_cache
def calculate_lr(base_lr):
return base_lr * warmup_factor * self.gamma**bisect_right(self.milestones, self.last_epoch)
return [calculate_lr(base_lr) for base_lr in self.base_lrs]
def _compute_values(self) -> List[float]:
return self.get_lr()
@LR_SCHEDULERS.register_module()
class ReduceOnPlateauSchedule(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(self, optimizer: torch.optim.Optimizer, **kwargs):
self.factor = kwargs['factor']
self.mode = kwargs['mode']
self.patience = kwargs['patience']
self.verbose = kwargs['verbose']
self.cooldown = kwargs['cooldown']
super().__init__(
optimizer,
mode=self.mode,
factor=self.factor,
patience=self.patience,
verbose=self.verbose,
cooldown=self.cooldown)
def get_lr(self):
return self.get_last_lr()
@LR_SCHEDULERS.register_module()
class WarmupCosineLR(_LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
*,
warmup_factor: float = 0.001,
warmup_iters: int = 1000,
warmup_method: str = 'linear',
last_epoch: int = -1,
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_iters,
self.warmup_factor)
@lru_cache
def calculate_lr(base_lr):
return base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters))
return [calculate_lr(base_lr) for base_lr in self.base_lrs]
def _compute_values(self) -> List[float]:
return self.get_lr()
@LR_SCHEDULERS.register_module()
class PythiaScheduler(LambdaLR):
def __init__(self, optimizer, *args, **kwargs):
self._lambda_func = lr_lambda_update
super().__init__(optimizer, self.lr_lambda, *args, **kwargs)
def lr_lambda(self, step):
return self._lambda_func(step, self._global_config)
@LR_SCHEDULERS.register_module()
class MultiStepScheduler(PythiaScheduler):
def __init__(self, optimizer, *args, **kwargs):
self.use_warmup = kwargs['use_warmup']
self.lr_steps = kwargs['lr_steps']
self.lr_ratio = kwargs['lr_ratio']
self.warmup_iterations = kwargs['warmup_iterations'] if self.use_warmup else 0
self.warmup_factor = kwargs['warmup_factor']
assert self.warmup_iterations < self.lr_steps[0]
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch <= self.warmup_iterations and self.use_warmup is True:
alpha = float(self.last_epoch) / float(self.warmup_iterations)
lr_ratio = self.warmup_factor * (1.0 - alpha) + alpha
return [base_lr * lr_ratio for base_lr in self.base_lrs]
else:
@lru_cache
def calculate_lr(base_lr):
return base_lr * self.lr_ratio**bisect_right(self.lr_steps, self.last_epoch)
return [calculate_lr(base_lr) for base_lr in self.base_lrs]
@LR_SCHEDULERS.register_module()
class WarmupLinearScheduleNonZero(_LRScheduler):
"""Linear warmup and then linear decay. Linearly increases learning rate
from 0 to max_lr over `warmup_steps` training steps.
Linearly decreases learning rate linearly to min_lr over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, t_total, warmup_iterations=0, use_warmup=False, min_lr=1e-5, last_epoch=-1):
self.use_warmup = use_warmup
self.warmup_iters = warmup_iterations
self.t_total = t_total
self.min_lr = min_lr
super(WarmupLinearScheduleNonZero, self).__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
step = self.last_epoch
if step < self.warmup_iters:
lr_factor = float(step) / float(max(1, self.warmup_iters))
else:
lr_factor = max(0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_iters)))
return [
base_lr * lr_factor if (base_lr * lr_factor) > self.min_lr else self.min_lr for base_lr in self.base_lrs
]
def _get_warmup_factor_at_iter(method: str, iter: int, warmup_iters: int, warmup_factor: float) -> float:
"""Return the learning rate warmup factor at a specific iteration. See
:paper:`in1k1h` for more details.
Args:
method (str): warmup method; either "constant" or "linear".
iter (int): iteration at which to calculate the warmup factor.
warmup_iters (int): the number of warmup iterations.
warmup_factor (float): the base warmup factor (the meaning changes according
to the method used).
Returns:
float: the effective warmup factor at the given iteration.
"""
if iter >= warmup_iters:
return 1.0
support_method = ['constant', 'linear']
def constant_method():
return warmup_factor
def linear_method():
alpha = iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
if method in support_method:
return eval(method + '_method')()
else:
raise ValueError('Unknown warmup method: {}'.format(method))
def lr_lambda_update(i_iter, cfg):
if cfg.training.use_warmup is True and i_iter <= cfg.training.warmup_iterations:
alpha = float(i_iter) / float(cfg.training.warmup_iterations)
return cfg.training.warmup_factor * (1.0 - alpha) + alpha
else:
idx = bisect(cfg.training.lr_steps, i_iter)
return pow(cfg.training.lr_ratio, idx)
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x / warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
"""Linearly increases learning rate over `warmup`*`t_total` (as provided to
BertAdam) training steps.
Learning rate is 1. afterwards.
"""
if x < warmup:
return x / warmup
return 1.0
def warmup_linear(x, warmup=0.002):
"""Specifies a triangular learning rate schedule where peak is reached at
`warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero.
"""
if x < warmup:
return x / warmup
return max((x - 1.) / (warmup - 1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
@LR_SCHEDULERS.register_module()
class BertWarmupLinearLR(torch.optim.lr_scheduler._LRScheduler):
"""Implements BERT version of Warmup Linear lr algorithm
Params:
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
"""
def __init__(
self,
optimizer: BertAdam,
max_iters: int,
warmup: float = -1,
warmup_method: str = 'warmup_linear',
last_epoch: int = -1,
):
if warmup_method not in SCHEDULES:
raise ValueError('Invalid schedule parameter: {}'.format(warmup_method))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError('Invalid warmup: {} - should be in [0.0, 1.0[ or -1'.format(warmup))
self.max_iters = max_iters
self.warmup = warmup
self.warmup_method = warmup_method
self.warned_for_t_total = False
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
if self.max_iters != -1:
if comm.is_main_process():
logger = logging.getLogger(__name__)
schedule_fct = SCHEDULES[self.warmup_method]
progress = self.last_epoch / self.max_iters
lr_cur = [base_lr * schedule_fct(progress, self.warmup) for base_lr in self.base_lrs]
# warning for exceeding t_total (only active with warmup_linear
if self.warmup_method == 'warmup_linear' and progress > 1. and not self.warned_for_t_total:
if comm.is_main_process():
logger.info(
"Training beyond specified 't_total' steps with schedule '{}'. Learning rate set to {}. "
"Please set 't_total' of {} correctly.".format(self.warmup_method, lr_cur,
self.__class__.__name__))
self.warned_for_t_total = True
# end warning
else:
lr_cur = [base_lr for base_lr in self.base_lrs]
# Different definitions of half-cosine with warmup are possible. For
# simplicity we multiply the standard half-cosine schedule by the warmup
# factor. An alternative is to start the period of the cosine at warmup_iters
# instead of at 0. In the case that warmup_iters << max_iters the two are
# very close to each other.
return lr_cur
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
@LR_SCHEDULERS.register_module()
class ConstantSchedule(LambdaLR):
def __new__(cls, optimizer, *args, **kwargs):
return get_constant_schedule(optimizer, *args, **kwargs)
@LR_SCHEDULERS.register_module()
class WarmupConstantSchedule(LambdaLR):
def __new__(cls, optimizer, *args, **kwargs):
return get_constant_schedule_with_warmup(optimizer, *args, **kwargs)
@LR_SCHEDULERS.register_module()
class WarmupLinearSchedule(LambdaLR):
"""Linear warmup and then linear decay. Linearly increases learning rate
from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __new__(cls, optimizer, *args, **kwargs):
return get_linear_schedule_with_warmup(optimizer, *args, **kwargs)
@LR_SCHEDULERS.register_module()
class WarmupCosineSchedule(LambdaLR):
def __new__(cls, optimizer, *args, **kwargs):
return get_cosine_schedule_with_warmup(optimizer, *args, **kwargs)
@LR_SCHEDULERS.register_module()
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
def __new__(cls, optimizer, *args, **kwargs):
return get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, *args, **kwargs)
@LR_SCHEDULERS.register_module()
class WarmupPolynomialSchedule(LambdaLR):
def __new__(cls, optimizer, *args, **kwargs):
return get_polynomial_decay_schedule_with_warmup(optimizer, *args, **kwargs)
| [
"torch.cos"
] | 1.7.1 | linxi1158/iMIX | af87a17275f02c94932bb2e29f132a84db812002 |
1.1 | import torch.nn as nn
from ..registry import HEADS
from ..utils import ConvModule
from mmdetection.core import auto_fp16
@HEADS.register_module
class MGANHead(nn.Module):
def __init__(self,
num_convs=2,
roi_feat_size=7,
in_channels=512,
conv_out_channels=512,
conv_cfg=None,
norm_cfg=None):
super(MGANHead, self).__init__()
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
logits_in_channel = self.conv_out_channels
self.conv_logits = nn.Conv2d(logits_in_channel, 1, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
x = self.conv_logits(x).sigmoid() * x
return x
| [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.ModuleList"
] | 1.1 | zjplab/Pedestron | 07e1a2cee82b57e1584b0c744f5b44f1ae92be73 |
1.1 | import torch.nn as nn
import torch.nn.functional as F
from mmdetection.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from .utils import weight_reduce_loss
from ..registry import LOSSES
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred, target, gamma, alpha)
# TODO: find a proper way to handle the shape of weight
if weight is not None:
weight = weight.view(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * sigmoid_focal_loss(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| [
"torch.nn.functional.binary_cross_entropy_with_logits"
] | 1.1 | zjplab/Pedestron | 07e1a2cee82b57e1584b0c744f5b44f1ae92be73 |
1.4 | # -*- coding: utf-8 -*
import numpy as np
from loguru import logger
import torch
import torch.nn as nn
import torch.nn.functional as F
from videoanalyst.model.common_opr.common_block import (conv_bn_relu,
xcorr_depthwise)
from videoanalyst.model.module_base import ModuleBase
from videoanalyst.model.task_model.taskmodel_base import (TRACK_TASKMODELS,
VOS_TASKMODELS)
torch.set_printoptions(precision=8)
@TRACK_TASKMODELS.register
@VOS_TASKMODELS.register
class SiamTrack(ModuleBase):
r"""
SiamTrack model for tracking
Hyper-Parameters
----------------
pretrain_model_path: string
path to parameter to be loaded into module
head_width: int
feature width in head structure
"""
default_hyper_params = dict(pretrain_model_path="",
head_width=256,
conv_weight_std=0.01,
neck_conv_bias=[True, True, True, True],
corr_fea_output=False,
trt_mode=False,
trt_fea_model_path="",
trt_track_model_path="")
support_phases = ["train", "feature", "track", "freeze_track_fea"]
def __init__(self, backbone, head, loss=None):
super(SiamTrack, self).__init__()
self.basemodel = backbone
self.head = head
self.loss = loss
self.trt_fea_model = None
self.trt_track_model = None
self._phase = "train"
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, p):
assert p in self.support_phases
self._phase = p
def forward(self, *args, phase=None):
r"""
Perform tracking process for different phases (e.g. train / init / track)
Arguments
---------
target_img: torch.Tensor
target template image patch
search_img: torch.Tensor
search region image patch
Returns
-------
fcos_score_final: torch.Tensor
predicted score for bboxes, shape=(B, HW, 1)
fcos_bbox_final: torch.Tensor
predicted bbox in the crop, shape=(B, HW, 4)
fcos_cls_prob_final: torch.Tensor
classification score, shape=(B, HW, 1)
fcos_ctr_prob_final: torch.Tensor
center-ness score, shape=(B, HW, 1)
"""
if phase is None:
phase = self._phase
# used during training
if phase == 'train':
# resolve training data
training_data = args[0]
target_img = training_data["im_z"]
search_img = training_data["im_x"]
# backbone feature
f_z = self.basemodel(target_img)
f_x = self.basemodel(search_img)
# feature adjustment
c_z_k = self.c_z_k(f_z)
r_z_k = self.r_z_k(f_z)
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
# feature matching
r_out = xcorr_depthwise(r_x, r_z_k)
c_out = xcorr_depthwise(c_x, c_z_k)
# head
fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(
c_out, r_out)
predict_data = dict(
cls_pred=fcos_cls_score_final,
ctr_pred=fcos_ctr_score_final,
box_pred=fcos_bbox_final,
)
if self._hyper_params["corr_fea_output"]:
predict_data["corr_fea"] = corr_fea
return predict_data
# used for template feature extraction (normal mode)
elif phase == 'feature':
target_img, = args
if self._hyper_params["trt_mode"]:
# extract feature with trt model
out_list = self.trt_fea_model(target_img)
else:
# backbone feature
f_z = self.basemodel(target_img)
# template as kernel
c_z_k = self.c_z_k(f_z)
r_z_k = self.r_z_k(f_z)
# output
out_list = [c_z_k, r_z_k]
# used for template feature extraction (trt mode)
elif phase == "freeze_track_fea":
search_img, = args
# backbone feature
f_x = self.basemodel(search_img)
# feature adjustment
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
# head
return [c_x, r_x]
# [Broken] used for template feature extraction (trt mode)
# currently broken due to following issue of "torch2trt" package
# c.f. https://github.com/NVIDIA-AI-IOT/torch2trt/issues/251
elif phase == "freeze_track_head":
c_out, r_out = args
# head
outputs = self.head(c_out, r_out, 0, True)
return outputs
# used for tracking one frame during test
elif phase == 'track':
if len(args) == 3:
search_img, c_z_k, r_z_k = args
if self._hyper_params["trt_mode"]:
c_x, r_x = self.trt_track_model(search_img)
else:
# backbone feature
f_x = self.basemodel(search_img)
# feature adjustment
c_x = self.c_x(f_x)
r_x = self.r_x(f_x)
elif len(args) == 4:
# c_x, r_x already computed
c_z_k, r_z_k, c_x, r_x = args
else:
raise ValueError("Illegal args length: %d" % len(args))
# feature matching
r_out = xcorr_depthwise(r_x, r_z_k)
c_out = xcorr_depthwise(c_x, c_z_k)
# head
fcos_cls_score_final, fcos_ctr_score_final, fcos_bbox_final, corr_fea = self.head(
c_out, r_out, search_img.size(-1))
# apply sigmoid
fcos_cls_prob_final = torch.sigmoid(fcos_cls_score_final)
fcos_ctr_prob_final = torch.sigmoid(fcos_ctr_score_final)
# apply centerness correction
fcos_score_final = fcos_cls_prob_final * fcos_ctr_prob_final
# register extra output
extra = dict(c_x=c_x, r_x=r_x, corr_fea=corr_fea)
# output
out_list = fcos_score_final, fcos_bbox_final, fcos_cls_prob_final, fcos_ctr_prob_final, extra
else:
raise ValueError("Phase non-implemented.")
return out_list
def update_params(self):
r"""
Load model parameters
"""
self._make_convs()
self._initialize_conv()
super().update_params()
if self._hyper_params["trt_mode"]:
logger.info("trt mode enable")
from torch2trt import TRTModule
self.trt_fea_model = TRTModule()
self.trt_fea_model.load_state_dict(
torch.load(self._hyper_params["trt_fea_model_path"]))
self.trt_track_model = TRTModule()
self.trt_track_model.load_state_dict(
torch.load(self._hyper_params["trt_track_model_path"]))
logger.info("loading trt model succefully")
def _make_convs(self):
head_width = self._hyper_params['head_width']
# feature adjustment
self.r_z_k = conv_bn_relu(head_width,
head_width,
1,
3,
0,
has_relu=False)
self.c_z_k = conv_bn_relu(head_width,
head_width,
1,
3,
0,
has_relu=False)
self.r_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)
self.c_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)
def _initialize_conv(self, ):
conv_weight_std = self._hyper_params['conv_weight_std']
conv_list = [
self.r_z_k.conv, self.c_z_k.conv, self.r_x.conv, self.c_x.conv
]
for ith in range(len(conv_list)):
conv = conv_list[ith]
torch.nn.init.normal_(conv.weight,
std=conv_weight_std) # conv_weight_std=0.01
def set_device(self, dev):
if not isinstance(dev, torch.device):
dev = torch.device(dev)
self.to(dev)
if self.loss is not None:
for loss_name in self.loss:
self.loss[loss_name].to(dev)
| [
"torch.device",
"torch.sigmoid",
"torch.set_printoptions",
"torch.nn.init.normal_",
"torch.load"
] | 1.4.0 | 983632847/video_analyst | 01b7ad278b828a3f7ff7a0488c5ca8f055240192 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Union
import numpy as np
import torch
from monai.metrics.utils import *
from monai.utils import MetricReduction
class SurfaceDistanceMetric:
"""
Compute Surface Distance between two tensors. It can support both multi-classes and multi-labels tasks.
It supports both symmetric and asymmetric surface distance calculation.
Input `y_pred` (BNHW[D] where N is number of classes) is compared with ground truth `y` (BNHW[D]).
`y_preds` is expected to have binarized predictions and `y` should be in one-hot format.
You can use suitable transforms in ``monai.transforms.post`` first to achieve binarized values.
Args:
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}
Define the mode to reduce computation result of 1 batch data. Defaults to ``"mean"``.
"""
def __init__(
self,
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
reduction: Union[MetricReduction, str] = MetricReduction.MEAN,
) -> None:
super().__init__()
self.include_background = include_background
self.distance_metric = distance_metric
self.symmetric = symmetric
self.reduction = reduction
def __call__(self, y_pred: torch.Tensor, y: torch.Tensor):
"""
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
Raises:
ValueError: when `y` is not a binarized tensor.
ValueError: when `y_pred` has less than three dimensions.
"""
if not torch.all(y_pred.byte() == y_pred):
warnings.warn("y_pred is not a binarized tensor here!")
if not torch.all(y.byte() == y):
raise ValueError("y should be a binarized tensor.")
dims = y_pred.ndimension()
if dims < 3:
raise ValueError("y_pred should have at least three dimensions.")
# compute (BxC) for each channel for each batch
f = compute_average_surface_distance(
y_pred=y_pred,
y=y,
include_background=self.include_background,
symmetric=self.symmetric,
distance_metric=self.distance_metric,
)
# do metric reduction
f, not_nans = do_metric_reduction(f, self.reduction)
return f, not_nans
def compute_average_surface_distance(
y_pred: Union[np.ndarray, torch.Tensor],
y: Union[np.ndarray, torch.Tensor],
include_background: bool = False,
symmetric: bool = False,
distance_metric: str = "euclidean",
):
"""
This function is used to compute the Average Surface Distance from `y_pred` to `y`
under the default setting.
In addition, if sets ``symmetric = True``, the average symmetric surface distance between
these two inputs will be returned.
Args:
y_pred: input data to compute, typical segmentation model output.
It must be one-hot format and first dim is batch, example shape: [16, 3, 32, 32]. The values
should be binarized.
y: ground truth to compute mean the distance. It must be one-hot format and first dim is batch.
The values should be binarized.
include_background: whether to skip distance computation on the first channel of
the predicted output. Defaults to ``False``.
symmetric: whether to calculate the symmetric average surface distance between
`seg_pred` and `seg_gt`. Defaults to ``False``.
distance_metric: : [``"euclidean"``, ``"chessboard"``, ``"taxicab"``]
the metric used to compute surface distance. Defaults to ``"euclidean"``.
"""
if not include_background:
y_pred, y = ignore_background(
y_pred=y_pred,
y=y,
)
y = y.float()
y_pred = y_pred.float()
if y.shape != y_pred.shape:
raise ValueError("y_pred and y should have same shapes.")
batch_size, n_class = y_pred.shape[:2]
asd = np.empty((batch_size, n_class))
for b, c in np.ndindex(batch_size, n_class):
(edges_pred, edges_gt) = get_mask_edges(y_pred[b, c], y[b, c])
surface_distance = get_surface_distance(edges_pred, edges_gt, distance_metric=distance_metric)
if surface_distance.shape == (0,):
avg_surface_distance = np.nan
else:
avg_surface_distance = surface_distance.mean()
if not symmetric:
asd[b, c] = avg_surface_distance
else:
surface_distance_2 = get_surface_distance(edges_gt, edges_pred, distance_metric=distance_metric)
if surface_distance_2.shape == (0,):
avg_surface_distance_2 = np.nan
else:
avg_surface_distance_2 = surface_distance_2.mean()
asd[b, c] = np.mean((avg_surface_distance, avg_surface_distance_2))
return torch.from_numpy(asd)
| [
"torch.from_numpy"
] | 1.5 | danielschulz/MONAI | 54ef6e9e700f0de3d50184c0148f953be871a58e |
1.7 | """
Package defining various dynamic forward models as well as convenience methods to generate the
right hand sides (RHS) of the related partial differential equations.
Currently, the following forward models are implemented:
#. An advection equation for images
#. An advection equation for maps
#. The EPDiff-equation parameterized using the vector-valued momentum for images
#. The EPDiff-equation parameterized using the vector-valued momentum for maps
#. The EPDiff-equation parameterized using the scalar-valued momentum for images
#. The EPDiff-equation parameterized using the scalar-valued momentum for maps
The images are expected to be tensors of dimension: BxCxXxYxZ (or BxCxX in 1D and BxCxXxY in 2D),
where B is the batch-size, C the number of channels, and X, Y, and Z are the spatial coordinate indices.
Futhermore the following (RHSs) are provided
#. Image advection
#. Map advection
#. Scalar conservation law
#. EPDiff
"""
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
from abc import ABCMeta, abstractmethod
import numpy as np
from . import finite_differences_multi_channel as fdm
from . import utils
from .data_wrapper import MyTensor
from future.utils import with_metaclass
import torch.nn as nn
import torch
class RHSLibrary(object):
"""
Convenience class to quickly generate various right hand sides (RHSs) of popular partial differential
equations. In this way new forward models can be written with minimal code duplication.
"""
def __init__(self, spacing, use_neumann_BC_for_map=False):
"""
Constructor
:param spacing: Spacing for the images. This will be an array with 1, 2, or 3 entries in 1D, 2D, and 3D respectively.
"""
self.spacing = spacing
"""spatial spacing"""
self.spacing_min = np.min(spacing)
""" min of the spacing"""
self.spacing_ratio = spacing/self.spacing_min
self.fdt_ne = fdm.FD_torch_multi_channel(spacing,mode='neumann_zero')
"""torch finite differencing support neumann zero"""
self.fdt_le = fdm.FD_torch_multi_channel( spacing, mode='linear')
"""torch finite differencing support linear extrapolation"""
self.fdt_di = fdm.FD_torch_multi_channel(spacing, mode='dirichlet_zero')
"""torch finite differencing support dirichlet zero"""
self.dim = len(self.spacing)
"""spatial dimension"""
self.use_neumann_BC_for_map = use_neumann_BC_for_map
"""If True uses zero Neumann boundary conditions also for evolutions of the map, if False uses linear extrapolation"""
def rhs_advect_image_multiNC(self,I,v):
'''
Advects a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-\\nabla I^Tv`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
rhs_ret= self._rhs_advect_image_multiN(I, v )
return rhs_ret
def _rhs_advect_image_multiN(self,I,v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the advection equation for one channel BxXxYxZ
"""
if self.dim == 1:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1]
elif self.dim == 2:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]
elif self.dim == 3:
rhs_ret = -self.fdt_ne.dXc(I) * v[:,0:1] -self.fdt_ne.dYc(I)*v[:,1:2]-self.fdt_ne.dZc(I)*v[:,2:3]
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_scalar_conservation_multiNC(self, I, v):
"""
Scalar conservation law for a batch of images which can be multi-channel. Expected image format here, is
BxCxXxYxZ, where B is the number of images (batch size), C, the number of channels
per image and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-div(Iv)`
:param I: Image batch BxCIxXxYxZ
:param v: Velocity fields (this will be one velocity field per image) BxCxXxYxZ
:return: Returns the RHS of the scalar conservation law equations involved BxCxXxYxZ
"""
rhs_ret=self._rhs_scalar_conservation_multiN(I, v)
return rhs_ret
def _rhs_scalar_conservation_multiN(self, I, v):
"""
:param I: One-channel input image: Bx1xXxYxZ
:param v: velocity field BxCxXxYxZ
:return: Returns the RHS of the scalar-conservation law equation for one channel BxXxYxZ
"""
if self.dim==1:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1])
elif self.dim==2:
rhs_ret = -self.fdt_ne.dXc(I*v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])
elif self.dim==3:
rhs_ret = -self.fdt_ne.dXc(I* v[:,0:1]) -self.fdt_ne.dYc(I*v[:,1:2])-self.fdt_ne.dZc(I*v[:,2:3])
else:
raise ValueError('Only supported up to dimension 3')
return rhs_ret
def rhs_lagrangian_evolve_map_multiNC(self, phi, v):
"""
Evolves a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D).
This is used to evolve the map going from source to target image. Requires interpolation
so should if at all possible not be used as part of an optimization.
the idea of compute inverse map is due to the map is defined
in the source space, referring to point move to where,(compared with the target space, refers to where it comes from)
in this situation, we only need to capture the velocity at that place and accumulate along the time step
since advecton function is moves the image (or phi based image) by v step, which means v is shared by different coordinate,
so it is safe to compute in this way.
:math:`v\circ\phi`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the evolution equations involved BxCxXxYxZ
:param phi:
:param v:
:return:
"""
rhs_ret = utils.compute_warped_image_multiNC(v, phi, spacing=self.spacing, spline_order=1,zero_boundary=False)
return rhs_ret
def rhs_advect_map_multiNC(self, phi, v):
'''
Advects a set of N maps (for N images). Expected format here, is
BxCxXxYxZ, where B is the number of images/maps (batch size), C, the number of channels
per (here the spatial dimension for the map coordinate functions),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
:math:`-D\\phi v`
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return: Returns the RHS of the advection equations involved BxCxXxYxZ
'''
sz = phi.size()
rhs_ret = self._rhs_advect_map_call(phi, v)
return rhs_ret
def _rhs_advect_map_call(self,phi,v):
"""
:param phi: map batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per map) BxCxXxYxZ
:return rhsphi: Returns the RHS of the advection equations involved BxCxXxYxZ
"""
fdc = self.fdt_le # use order boundary conditions (interpolation)
if self.dim==1:
dxc_phi = -fdc.dXc(phi)
rhsphi = v[:, 0:1] * dxc_phi
elif self.dim==2:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
rhsphi = v[:, 0:1] * dxc_phi + v[:, 1:2] * dyc_phi
elif self.dim==3:
dxc_phi = -fdc.dXc(phi)
dyc_phi = -fdc.dYc(phi)
dzc_phi = -fdc.dZc(phi)
rhsphi = v[:,0:1]*dxc_phi + v[:,1:2]*dyc_phi + v[:,2:3]*dzc_phi
else:
raise ValueError('Only supported up to dimension 3')
return rhsphi
def rhs_epdiff_multiNC(self, m, v):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_epdiff_call(m, v, rhs_ret)
return rhs_ret
def _rhs_epdiff_call(self, m, v,rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
#fdc = self.fdt_le
if self.dim == 1:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dxc_v = -fdc.dXc(v)
dxc_v_multi_m = dxc_v * m
rhsm[:]= dxc_mv0 + dxc_v_multi_m
elif self.dim == 2:
# (m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm (EPDiff equation)
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dc_mv_sum = dxc_mv0 + dyc_mv1
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dxc_v_multi_m = dxc_v * m
dyc_v_multi_m = dyc_v * m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m, 1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m, 1)
rhsm[:,0, :, :] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:,1, :, :] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
elif self.dim == 3:
dxc_mv0 = -fdc.dXc(m*v[:,0:1])
dyc_mv1 = -fdc.dYc(m*v[:,1:2])
dzc_mv2 = -fdc.dZc(m*v[:,2:3])
dc_mv_sum = dxc_mv0 + dyc_mv1 + dzc_mv2
dxc_v = -fdc.dXc(v)
dyc_v = -fdc.dYc(v)
dzc_v = -fdc.dZc(v)
dxc_v_multi_m = dxc_v*m
dyc_v_multi_m = dyc_v*m
dzc_v_multi_m = dzc_v*m
dxc_v_multi_m_sum = torch.sum(dxc_v_multi_m,1)
dyc_v_multi_m_sum = torch.sum(dyc_v_multi_m,1)
dzc_v_multi_m_sum = torch.sum(dzc_v_multi_m,1)
rhsm[:, 0] = dc_mv_sum[:,0] + dxc_v_multi_m_sum
rhsm[:, 1] = dc_mv_sum[:,1] + dyc_v_multi_m_sum
rhsm[:, 2] = dc_mv_sum[:,2] + dzc_v_multi_m_sum
else:
raise ValueError('Only supported up to dimension ')
return rhsm
def rhs_adapt_epdiff_wkw_multiNC(self, m, v,w, sm_wm,smoother):
'''
Computes the right hand side of the EPDiff equation for of N momenta (for N images).
Expected format here, is BxCxXxYxZ, where B is the number of momenta (batch size), C,
the number of channels per (here the spatial dimension for the momenta),
and X, Y, Z are the spatial coordinates (X only in 1D; X,Y only in 2D)
a new version, where batch is no longer calculated separately
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:param m: momenta batch BxCXxYxZ
:param v: Velocity fields (this will be one velocity field per momentum) BxCXxYxZ
:return: Returns the RHS of the EPDiff equations involved BxCXxYxZ
'''
sz = m.size()
rhs_ret = MyTensor(sz).zero_()
rhs_ret = self._rhs_adapt_epdiff_wkw_call(m, v,w,sm_wm,smoother, rhs_ret)
return rhs_ret
def _rhs_adapt_epdiff_wkw_call(self, m, v,w,sm_wm, smoother, rhsm):
"""
:param m: momenta batch BxCxXxYxZ
:param sm_wm: smoothed(wm) batch x K x dim x X x Y x ...
:param w: smoothed(wm) batch x K x X x Y x ...
:param v: Velocity fields (this will be one velocity field per momentum) BxCxXxYxZ
:return rhsm: Returns the RHS of the EPDiff equations involved BxCxXxYxZ
"""
# if self.use_neumann_BC_for_map:
# fdc = self.fdt_ne # use zero Neumann boundary conditions
# else:
# fdc = self.fdt_le # do linear extrapolation
fdc = self.fdt_ne
rhs = self._rhs_epdiff_call(m,v,rhsm)
ret_var = torch.empty_like(rhs)
# ret_var, rhs should batch x dim x X x Yx ..
dim = m.shape[1]
sz = [m.shape[0]]+[1]+list(m.shape[1:]) # batchx1xdimx X x Y
m = m.view(*sz)
m_sm_wm = m* sm_wm
m_sm_wm = m_sm_wm.sum(dim=2)
sm_m_sm_wm = smoother.smooth(m_sm_wm) # batchx K x X xY...
dxc_w = fdc.dXc(w)
dc_w_list = [dxc_w]
if dim == 2 or dim == 3:
dyc_w = fdc.dYc(w)
dc_w_list.append(dyc_w)
if dim == 3:
dzc_w = fdc.dZc(w) # batch x K x X xY ...
dc_w_list.append(dzc_w)
for i in range(dim):
ret_var[:, i] = rhs[:, i] + (sm_m_sm_wm* dc_w_list[i]).sum(1)
return ret_var
class ForwardModel(with_metaclass(ABCMeta, object)):
"""
Abstract forward model class. Should never be instantiated.
Derived classes require the definition of f(self,t,x,u,pars) and u(self,t,pars).
These functions will be used for integration: x'(t) = f(t,x(t),u(t))
"""
def __init__(self, sz, spacing, params=None):
'''
Constructor of abstract forward model class
:param sz: size of images
:param spacing: numpy array for spacing in x,y,z directions
'''
self.dim = spacing.size # spatial dimension of the problem
"""spatial dimension"""
self.spacing = spacing
"""spatial spacing"""
self.sz = sz
"""image size (BxCxXxYxZ)"""
self.params = params
"""ParameterDict instance holding parameters"""
self.rhs = RHSLibrary(self.spacing)
"""rhs library support"""
if self.dim>3 or self.dim<1:
raise ValueError('Forward models are currently only supported in dimensions 1 to 3')
self.debug_mode_on =False
@abstractmethod
def f(self,t,x,u,pars,variables_from_optimizer=None):
"""
Function to be integrated
:param t: time
:param x: state
:param u: input
:param pars: optional parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the function value, should return a list (to support easy concatenations of states)
"""
pass
def u(self,t,pars,variables_from_optimizer=None):
"""
External input
:param t: time
:param pars: parameters
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: the external input
"""
return []
class AdvectMap(ForwardModel):
"""
Forward model to advect an n-D map using a transport equation: :math:`\\Phi_t + D\\Phi v = 0`.
v is treated as an external argument and \Phi is the state
"""
def __init__(self, sz, spacing, params=None,compute_inverse_map=False):
super(AdvectMap,self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation:
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the map, \Phi, itself (assumes 3D-5D array; [nrI,0,:,:] x-coors; [nrI,1,:,:] y-coors; ...
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [phi]
"""
if self.compute_inverse_map:
return [self.rhs.rhs_advect_map_multiNC(x[0], u),self.rhs.rhs_lagrangian_evolve_map_multiNC(x[1], u)]
else:
return [self.rhs.rhs_advect_map_multiNC(x[0],u)]
class AdvectImage(ForwardModel):
"""
Forward model to advect an image using a transport equation: :math:`I_t + \\nabla I^Tv = 0`.
v is treated as an external argument and I is the state
"""
def __init__(self, sz, spacing, params=None):
super(AdvectImage, self).__init__(sz, spacing,params)
def u(self,t, pars, variables_from_optimizer=None):
"""
External input, to hold the velocity field
:param t: time (ignored; not time-dependent)
:param pars: assumes an n-D velocity field is passed as the only input argument
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: Simply returns this velocity field
"""
return pars['v']
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of transport equation: :math:`-\\nabla I^T v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, I, itself (supports multiple images and channels)
:param u: external input, will be the velocity field here
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [I]
"""
return [self.rhs.rhs_advect_image_multiNC(x[0],u)]
class EPDiffImage(ForwardModel):
"""
Forward model for the EPdiff equation. State is the momentum, m, and the image I:
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`I_t+\\nabla I^Tv=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffImage, self).__init__(sz, spacing,params)
self.smoother = smoother
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:param t: time (ignored; not time-dependent)
:param x: state, here the vector momentum, m, and the image, I
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,I]
"""
# assume x[0] is m and x[1] is I for the state
m = x[0]
I = x[1]
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I': I}),variables_from_optimizer)
# print('max(|v|) = ' + str( v.abs().max() ))
return [self.rhs.rhs_epdiff_multiNC(m,v), self.rhs.rhs_advect_image_multiNC(I,v)]
class EPDiffMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None,compute_inverse_map=False):
super(EPDiffMap, self).__init__(sz,spacing,params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.use_net = True if self.params['smoother']['type'] == 'adaptiveNet' else False
def debugging(self,input,t):
x = utils.checkNan(input)
if np.sum(x):
print("find nan at {} step".format(t))
print("flag m: {}, ".format(x[0]))
print("flag v: {},".format(x[1]))
print("flag phi: {},".format(x[2]))
print("flag new_m: {},".format(x[3]))
print("flag new_phi: {},".format(x[4]))
raise ValueError("nan error")
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m = m.clamp(max=1., min=-1.)
phi = x[1]
if self.compute_inverse_map:
phi_inv = x[2]
if not self.use_net:
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'phi':phi}),variables_from_optimizer)
else:
v = self.smoother.adaptive_smooth(m, phi, using_map=True)
# print('max(|v|) = ' + str( v.abs().max() ))
if self.compute_inverse_map:
ret_val= [self.rhs.rhs_epdiff_multiNC(m,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
new_m = self.rhs.rhs_epdiff_multiNC(m,v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi,v)
ret_val= [new_m, new_phi]
return ret_val
class EPDiffAdaptMap(ForwardModel):
"""
Forward model for the EPDiff equation. State is the momentum, m, and the transform, :math:`\\phi`
(mapping the source image to the target image).
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`\\phi_t+D\\phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False, update_sm_by_advect= True, update_sm_with_interpolation=True,compute_on_initial_map=True):
super(EPDiffAdaptMap, self).__init__(sz, spacing, params)
from . import module_parameters as pars
from . import smoother_factory as sf
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
self.smoother = smoother
self.update_sm_by_advect = update_sm_by_advect
self.use_the_first_step_penalty = True
self.update_sm_with_interpolation = update_sm_with_interpolation
self.compute_on_initial_map=compute_on_initial_map
self.update_sm_weight=None
self.velocity_mask = None
self.debug_mode_on = False
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] =self.params['smoother']['deep_smoother']['deep_network_local_weight_smoothing']
self.embedded_smoother = sf.SmootherFactory(sz[2:], spacing).create_smoother(
s_m_params)
""" if only take the first step penalty as the total penalty, otherwise accumluate the penalty"""
def debug_nan(self, input, t,name=''):
x = utils.checkNan([input])
if np.sum(x):
# print(input[0])
print("find nan at {} step, {} with number {}".format(t,name,x[0]))
raise ValueError("nan error")
def init_zero_sm_weight(self,sm_weight):
self.update_sm_weight = torch.zeros_like(sm_weight).detach()
def init_velocity_mask(self,velocity_mask):
self.velocity_mask = velocity_mask
def debug_distrib(self,var,name):
var = var.detach().cpu().numpy()
density,_= np.histogram(var,[-100,-10,-1,0,1,10,100],density=True)
print("{} distri:{}".format(name,density))
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm'
:math:`-D\\phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the image, vector momentum, m, and the map, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [m,phi]
"""
# assume x[0] is m and x[1] is phi for the state
m = x[0]
m=m.clamp(max=1., min=-1.)
phi = x[1]
return_val_name = []
sm_weight = None
if self.update_sm_by_advect:
if not self.update_sm_with_interpolation:
sm_weight_pre = x[2]
sm_weight = self.embedded_smoother.smooth(sm_weight_pre)
v, extra_ret = self.smoother.smooth(m, None, {'w':sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v* self.velocity_mask
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight_pre = self.rhs.rhs_advect_map_multiNC(sm_weight_pre, v)
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m, v, new_sm_weight_pre, extra_ret,
self.embedded_smoother)
ret_val = [new_m, new_phi,new_sm_weight_pre]
return_val_name =['new_m','new_phi','new_sm_weight']
else:
if self.compute_on_initial_map:
sm_weight = x[2]
sm_phi = x[3]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, sm_phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
#print('t{},m min, mean,max {} {} {}'.format(t,m.min().item(),m.mean().item(),m.max().item()))
v,extra_ret = self.smoother.smooth(m,None,{'w': new_sm_weight},multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_phi = self.rhs.rhs_advect_map_multiNC(sm_phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi,new_sm_weight,new_sm_phi]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight','new_sm_phi']
else: #todo just attention here is what we currently used
sm_weight = x[2]
new_sm_weight = utils.compute_warped_image_multiNC(sm_weight, phi, self.spacing, 1,
zero_boundary=False)
pre_weight = sm_weight
new_sm_weight = self.embedded_smoother.smooth(new_sm_weight)
v, extra_ret = self.smoother.smooth(m, None,{'w':new_sm_weight}, multi_output=True)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_adapt_epdiff_wkw_multiNC(m,v,pre_weight,extra_ret,self.embedded_smoother)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
new_sm_weight = self.update_sm_weight.detach()
ret_val = [new_m, new_phi, new_sm_weight]
return_val_name = ['new_m', 'new_phi', 'new_sm_weight']
else:
if not t==0:
if self.use_the_first_step_penalty:
self.smoother.disable_penalty_computation()
else:
self.smoother.enable_accumulated_penalty()
I = utils.compute_warped_image_multiNC(pars['I0'], phi, self.spacing, 1,zero_boundary=True)
pars['I'] = I.detach() # TODO check whether I should be detached here
v = self.smoother.smooth(m, None, pars, variables_from_optimizer)
if self.velocity_mask is not None:
v = v * self.velocity_mask
new_m = self.rhs.rhs_epdiff_multiNC(m, v)
new_phi = self.rhs.rhs_advect_map_multiNC(phi, v)
ret_val = [new_m, new_phi]
return_val_name =['new_m','new_phi']
if self.debug_mode_on:
toshows = [m, v,phi]+ret_val if sm_weight is None else [m, v,phi]+ret_val +[sm_weight]
name = ['m', 'v','phi']+return_val_name if sm_weight is None else ['m', 'v','phi']+return_val_name +['sm_weight']
for i, toshow in enumerate(toshows):
print('t{},{} min, mean,max {} {} {}'.format(t, name[i], toshow.min().item(), toshow.mean().item(),
toshow.max().item()))
self.debug_distrib(toshow, name[i])
self.debug_nan(toshow,t,name[i])
return ret_val
# print('max(|v|) = ' + str( v.abs().max() ))
class EPDiffScalarMomentum(ForwardModel):
"""
Base class for scalar momentum EPDiff solutions. Defines a smoother that can be commonly used.
"""
def __init__(self, sz, spacing, smoother, params):
super(EPDiffScalarMomentum,self).__init__(sz,spacing,params)
self.smoother = smoother
class EPDiffScalarMomentumImage(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPdiff equation. State is the scalar momentum, lam, and the image I
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:'m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
"""
def __init__(self, sz, spacing, smoother, params=None):
super(EPDiffScalarMomentumImage, self).__init__(sz, spacing, smoother, params)
def f(self, t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math: `-div(\\lambda v)`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, and the image, I, itself
:param u: no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I]
"""
# assume x[0] is \lambda and x[1] is I for the state
lam = x[0]
I = x[1]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
# advection for I, scalar-conservation law for lam
return [self.rhs.rhs_scalar_conservation_multiNC(lam, v), self.rhs.rhs_advect_image_multiNC(I, v)]
class EPDiffScalarMomentumMap(EPDiffScalarMomentum):
"""
Forward model for the scalar momentum EPDiff equation. State is the scalar momentum, lam, the image, I, and the transform, phi.
:math:`(m_1,...,m_d)^T_t = -(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`v=Km`
:math:`m=\\lambda\\nabla I`
:math:`I_t+\\nabla I^Tv=0`
:math:`\\lambda_t + div(\\lambda v)=0`
:math:`\\Phi_t+D\\Phi v=0`
"""
def __init__(self, sz, spacing, smoother, params=None, compute_inverse_map=False):
super(EPDiffScalarMomentumMap, self).__init__(sz,spacing, smoother, params)
self.compute_inverse_map = compute_inverse_map
"""If True then computes the inverse map on the fly for a map-based solution"""
def f(self,t, x, u, pars=None, variables_from_optimizer=None):
"""
Function to be integrated, i.e., right hand side of the EPDiff equation:
:math:`-(div(m_1v),...,div(m_dv))^T-(Dv)^Tm`
:math:`-\\nabla I^Tv`
:math:`-div(\\lambda v)`
:math:`-D\\Phi v`
:param t: time (ignored; not time-dependent)
:param x: state, here the scalar momentum, lam, the image, I, and the transform, :math:`\\phi`
:param u: ignored, no external input
:param pars: ignored (does not expect any additional inputs)
:param variables_from_optimizer: variables that can be passed from the optimizer
:return: right hand side [lam,I,phi]
"""
# assume x[0] is lam and x[1] is I and x[2] is phi for the state
lam = x[0]
I = x[1]
phi = x[2]
if self.compute_inverse_map:
phi_inv = x[3]
# now compute the momentum
m = utils.compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, self.sz, self.spacing)
# todo: replace this by phi again
#v = self.smoother.smooth(m,None,[phi,True],variables_from_optimizer)
v = self.smoother.smooth(m,None,utils.combine_dict(pars,{'I':I}),variables_from_optimizer)
if self.compute_inverse_map:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v),
self.rhs.rhs_lagrangian_evolve_map_multiNC(phi_inv,v)]
else:
ret_val = [self.rhs.rhs_scalar_conservation_multiNC(lam,v),
self.rhs.rhs_advect_image_multiNC(I,v),
self.rhs.rhs_advect_map_multiNC(phi,v)]
return ret_val
| [
"torch.empty_like",
"torch.zeros_like",
"torch.sum"
] | 1.7 | HastingsGreer/mermaid | bd13c5fc427eb8cd9054973a8eaaeb302078182d |
1.7 | """
This package enables easy single-scale and multi-scale optimization support.
"""
from __future__ import print_function
from __future__ import absolute_import
# from builtins import zip
# from builtins import str
# from builtins import range
# from builtins import object
from abc import ABCMeta, abstractmethod
import os
import time
import copy
from . import utils
from . import visualize_registration_results as vizReg
from . import custom_optimizers as CO
import numpy as np
import torch
from .data_wrapper import USE_CUDA, AdaptVal, MyTensor
from . import model_factory as MF
from . import image_sampling as IS
from .metrics import get_multi_metric
from .res_recorder import XlsxRecorder
from .data_utils import make_dir
from torch.utils.data import Dataset, DataLoader
from . import optimizer_data_loaders as OD
from . import fileio as FIO
from . import model_evaluation
from collections import defaultdict
from future.utils import with_metaclass
from termcolor import colored, cprint
# add some convenience functionality
class SimpleRegistration(with_metaclass(ABCMeta, object)):
"""
Abstract optimizer base class.
"""
def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):
"""
:param ISource: source image
:param ITarget: target image
:param spacing: image spacing
:param params: parameters
:param compute_inverse_map: for map-based method the inverse map can be computed on the fly
"""
self.params = params
self.use_map = self.params['model']['deformation'][('use_map', True, '[True|False] either do computations via a map or directly using the image')]
self.map_low_res_factor = self.params['model']['deformation'][('map_low_res_factor', 1.0, 'Set to a value in (0,1) if a map-based solution should be computed at a lower internal resolution (image matching is still at full resolution')]
self.spacing = spacing
self.ISource = ISource
self.ITarget = ITarget
self.sz = sz
self.compute_inverse_map = compute_inverse_map
self.default_learning_rate=default_learning_rate
self.optimizer = None
def get_history(self):
"""
Returns the optimization history as a dictionary. Keeps track of energies, iterations counts, and additonal custom measures.
:return: history dictionary
"""
if self.optimizer is not None:
return self.optimizer.get_history()
else:
return None
def write_parameters_to_settings(self):
"""
Allows currently computed parameters (if they were optimized) to be written back to an output parameter file
:return:
"""
if self.optimizer is not None:
self.optimizer.write_parameters_to_settings()
@abstractmethod
def register(self):
"""
Abstract method to register the source to the target image
:return:
"""
pass
def get_optimizer(self):
"""
Returns the optimizer being used (can be used to customize the simple registration if desired)
:return: optimizer
"""
return self.optimizer
def get_energy(self):
"""
Returns the current energy
:return: Returns a tuple (energy, similarity energy, regularization energy)
"""
if self.optimizer is not None:
return self.optimizer.get_energy()
else:
return None
def get_warped_label(self):
"""
Returns the warped label
:return: the warped label
"""
if self.optimizer is not None:
return self.optimizer.get_warped_label()
else:
return None
def get_warped_image(self):
"""
Returns the warped image
:return: the warped image
"""
if self.optimizer is not None:
return self.optimizer.get_warped_image()
else:
return None
def set_initial_map(self,map0,initial_inverse_map=None):
"""
Sets the initial map for the registrations; by default (w/o setting anything) this will be the identity
map, but by setting it to a different initial condition one can concatenate transformations.
:param map0:
:return: n/a
"""
if self.optimizer is not None:
self.optimizer.set_initial_map(map0, initial_inverse_map)
# self.optimizer.set_initial_inverse_map(initial_inverse_map)
def set_weight_map(self,weight_map):
if self.optimizer is not None:
self.optimizer.set_initial_map(weight_map)
def get_initial_map(self):
"""
Returns the initial map; this will typically be the identity map, but can be set to a different initial
condition using set_initial_map
:return: returns the initial map (if applicable)
"""
if self.optimizer is not None:
return self.optimizer.get_initial_map()
else:
return None
def get_initial_inverse_map(self):
"""
Returns the initial inverse map; this will typically be the identity map, but can be set to a different initial
condition using set_initial_map
:return: returns the initial map (if applicable)
"""
if self.optimizer is not None:
return self.optimizer.get_initial_inverse_map()
else:
return None
def get_map(self):
"""
Returns the deformation map
:return: deformation map
"""
if self.optimizer is not None:
return self.optimizer.get_map()
def get_inverse_map(self):
"""
Returns the inverse deformation map if available
:return: deformation map
"""
if self.optimizer is not None:
return self.optimizer.get_inverse_map()
def get_model_parameters(self):
"""
Returns the parameters of the model
:return: model parameters
"""
return self.optimizer.get_model_parameters()
def set_model_parameters(self,p):
"""
Sets the parameters of a model
:param p: model parameters
:return:
"""
self.optimizer.set_model_parameters(p)
def get_model_state_dict(self):
"""
Returns the state dictionary of the mode
:return: state dictionary
"""
return self.optimizer.get_model_state_dict()
def set_model_state_dict(self,sd):
"""
Sets the state dictionary of the model
:param sd: state dictionary
:return:
"""
self.optimizer.set_model_state_dict(sd)
class SimpleSingleScaleRegistration(SimpleRegistration):
"""
Simple single scale registration
"""
def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):
super(SimpleSingleScaleRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map,default_learning_rate=default_learning_rate)
self.optimizer = SingleScaleRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
def register(self):
"""
Registers the source to the target image
:return: n/a
"""
self.optimizer.register(self.ISource, self.ITarget)
class SimpleSingleScaleConsensusRegistration(SimpleRegistration):
"""
Single scale registration making use of consensus optimization (to allow for multiple independent registration
that can share parameters).
"""
def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):
super(SimpleSingleScaleConsensusRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
self.optimizer = SingleScaleConsensusRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
def register(self):
"""
Registers the source to the target image
:return: n/a
"""
self.optimizer.register(self.ISource, self.ITarget)
class SimpleSingleScaleBatchRegistration(SimpleRegistration):
"""
Single scale registration making use of batch optimization (to allow optimizing over many or large images).
"""
def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):
super(SimpleSingleScaleBatchRegistration, self).__init__(ISource,ITarget,spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
self.optimizer = SingleScaleBatchRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map,default_learning_rate=default_learning_rate)
def register(self):
"""
Registers the source to the target image
:return: n/a
"""
self.optimizer.register(self.ISource, self.ITarget)
class SimpleMultiScaleRegistration(SimpleRegistration):
"""
Simple multi scale registration
"""
def __init__(self,ISource,ITarget,spacing,sz,params,compute_inverse_map=False, default_learning_rate=None):
super(SimpleMultiScaleRegistration, self).__init__(ISource, ITarget, spacing,sz,params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
self.optimizer = MultiScaleRegistrationOptimizer(self.sz,self.spacing,self.use_map,self.map_low_res_factor,self.params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
def register(self):
"""
Registers the source to the target image
:return: n/a
"""
self.optimizer.register(self.ISource,self.ITarget)
class Optimizer(with_metaclass(ABCMeta, object)):
"""
Abstract optimizer base class.
"""
def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):
"""
Constructor.
:param sz: image size in BxCxXxYxZ format
:param spacing: spatial spacing, e.g., [0.1,0.1,0.1] in 3D
:param useMap: boolean, True if a coordinate map is evolved to warp images, False otherwise
:param map_low_res_factor: if <1 evolutions happen at a lower resolution; >=1 ignored
:param params: ParametersDict() instance to hold parameters
:param compute_inverse_map: for map-based models the inverse map can be computed on the fly
"""
self.sz = sz
"""image size"""
self.spacing = spacing
"""image spacing"""
self.lowResSize = None
"""low res image size"""
self.lowResSpacing = None
"""low res image spacing"""
self.useMap = useMap
"""makes use of map"""
self.mapLowResFactor = mapLowResFactor
"""if <1 then evolutions are at a lower resolution, but image is compared at the same resolution; >=1 ignored"""
if self.mapLowResFactor is not None:
if self.mapLowResFactor>1:
print('mapLowResFactor needs to be <=1 but is set to ' + str( self.mapLowResFactor ) + '; ignoring it')
self.mapLowResFactor = None
elif self.mapLowResFactor==1:
print('mapLowResFactor = 1: performing computations at original resolution.')
self.mapLowResFactor = None
self.compute_inverse_map = compute_inverse_map
"""If set to True the inverse map is computed on the fly for map-based models"""
self.default_learning_rate = default_learning_rate
"""If set, this will be the learning rate that the optimizers used (otherwise, as specified in the json configuration, via params)"""
self.params = params
"""general parameters"""
self.rel_ftol = 1e-4
"""relative termination tolerance for optimizer"""
self.last_successful_step_size_taken = None
"""Records the last successful step size an optimizer took (possible use: propogate step size between multiscale levels"""
self.external_optimizer_parameter_loss = None
if (self.mapLowResFactor is not None):
self.lowResSize = utils._get_low_res_size_from_size( sz, self.mapLowResFactor )
self.lowResSpacing = utils._get_low_res_spacing_from_spacing(self.spacing,sz,self.lowResSize)
self.sampler = IS.ResampleImage()
self.params[('optimizer', {}, 'optimizer settings')]
self.params[('model', {}, 'general model settings')]
self.params['model'][('deformation', {}, 'model describing the desired deformation model')]
self.params['model'][('registration_model', {}, 'general settings for the registration model')]
self.params['model']['deformation']['use_map']= (useMap, '[True|False] either do computations via a map or directly using the image')
self.params['model']['deformation']['map_low_res_factor'] = (mapLowResFactor, 'Set to a value in (0,1) if a map-based solution should be computed at a lower internal resolution (image matching is still at full resolution')
self.compute_similarity_measure_at_low_res = self.params['model']['deformation'][('compute_similarity_measure_at_low_res',False,'If set to true map is not upsampled and the entire computations proceeds at low res')]
self.rel_ftol = self.params['optimizer']['single_scale'][('rel_ftol',self.rel_ftol,'relative termination tolerance for optimizer')]
self.spline_order = params['model']['registration_model'][('spline_order', 1, 'Spline interpolation order; 1 is linear interpolation (default); 3 is cubic spline')]
"""order of the spline for interpolations"""
self.show_iteration_output = True
self.history = dict()
self.optimizer_has_been_initialized = False
"""
Needs to be set before the actual optimization commences; allows to keep track if all parameters have been set
and for example to delay external parameter settings
"""
def write_parameters_to_settings(self):
"""
Writes current state of optimized parameters back to the json setting file (for example to keep track of optimized weights)
:return:
"""
pass
def turn_iteration_output_on(self):
self.show_iteration_output = True
def turn_iteration_output_off(self):
self.show_iteration_output = False
def get_history(self):
"""
Returns the optimization history as a dictionary. Keeps track of energies, iterations counts, and additonal custom measures.
:return: history dictionary
"""
return self.history
def _add_to_history(self,key,value):
"""
Adds an element to the optimizer history
:param key: history key
:param value: value that is associated with it
:return: n/a
"""
if key not in self.history:
self.history[key] = [value]
else:
self.history[key].append(value)
def set_last_successful_step_size_taken(self,lr):
"""
Function to let the optimizer know what step size has been successful previously.
Useful for example to retain optimization "memory" across scales in a multi-scale implementation
:param lr: step size
:return: n/a
"""
self.last_successful_step_size_taken=lr
def get_last_successful_step_size_taken(self):
"""
Returns the last successful step size the optimizer has taken (if the optimizer supports querying the step size)
:return: last successful step size
"""
return self.last_successful_step_size_taken
def set_rel_ftol(self, rel_ftol):
"""Sets the relative termination tolerance: :math:`|f(x_i)-f(x_{i-1})|/f(x_i)<tol`
:param rel_ftol: relative termination tolerance for optimizer
"""
self.rel_ftol = rel_ftol
self.params['optimizer']['single_scale']['rel_ftol'] = (rel_ftol,'relative termination tolerance for optimizer')
self.rel_ftol = self.params['optimizer']['single_scale']['rel_ftol']
def get_rel_ftol(self):
"""
Returns the optimizer termination tolerance
"""
return self.rel_ftol
@abstractmethod
def set_model(self, modelName):
"""
Abstract method to select the model which should be optimized by name
:param modelName: name (string) of the model that should be solved
"""
pass
@abstractmethod
def optimize(self):
"""
Abstract method to start the optimization
"""
pass
def get_last_successful_step_size_taken(self):
return self.last_successful_step_size_taken
def get_checkpoint_dict(self):
"""
Returns a dict() object containing the information for the current checkpoint.
:return: checpoint dictionary
"""
return dict()
def load_checkpoint_dict(self,d,load_optimizer_state=False):
"""
Takes the dictionary from a checkpoint and loads it as the current state of optimizer and model
:param d: dictionary
:param load_optimizer_state: if set to True the optimizer state will be restored
:return: n/a
"""
pass
def save_checkpoint(self,filename):
torch.save(self.get_checkpoint_dict(),filename)
def load_checkpoint(self,filename):
d = torch.load(filename)
self.load_checkpoint_dict(d)
def set_external_optimizer_parameter_loss(self,opt_parameter_loss):
"""
Allows to set an external method as an optimizer parameter loss
:param opt_parameter_loss: method which takes shared_model_parameters as its only input
:return: returns a scalar value which is the loss
"""
self.external_optimizer_parameter_loss = opt_parameter_loss
def get_external_optimizer_parameter_loss(self):
"""
Returns the externally set method for parameter loss. Will be None if none was set.
:return: method
"""
return self.external_optimizer_parameter_loss
def compute_optimizer_parameter_loss(self,shared_model_parameters):
"""
Returns the optimizer parameter loss. This is the method that should be called to compute this loss.
Will either evaluate the method optimizer_parameter_loss or if one was externally defined, the
externally defined one will have priority.
:param shared_model_parameters: paramters that have been declared shared in a model
:return: parameter loss
"""
if self.external_optimizer_parameter_loss is not None:
return self.external_optimizer_parameter_loss(shared_model_parameters)
else:
return self.optimizer_parameter_loss(shared_model_parameters)
def optimizer_parameter_loss(self,shared_model_parameters):
"""
This allows to define additional terms for the loss which are based on parameters that are shared
between models (for example for the smoother). Can be used to define a form of consensus optimization.
:param shared_model_parameters: paramters that have been declared shared in a model
:return: 0 by default, otherwise the corresponding penalty
"""
return MyTensor(1).zero_()
class ImageRegistrationOptimizer(Optimizer):
"""
Optimization class for image registration.
"""
def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):
super(ImageRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
self.ISource = None
"""source image"""
self.lowResISource = None
"""if mapLowResFactor <1, a lowres soure image needs to be created to parameterize some of the registration algorithms"""
self.lowResITarget = None
"""if mapLowResFactor <1, a lowres target image may need to be created to be used as additonal inputs for registration algorithms"""
self.ITarget = None
"""target image"""
self.LSource = None
""" source label """
self.LTarget = None
""" target label """
self.lowResLSource = None
"""if mapLowResFactor <1, a lowres soure label image needs to be created to parameterize some of the registration algorithms"""
self.lowResLTarget = None
"""if mapLowResFactor <1, a lowres target label image needs to be created to parameterize some of the registration algorithms"""
self.initialMap = None
""" initial map"""
self.initialInverseMap = None
""" initial inverse map"""
self.weight_map =None
""" initial weight map"""
self.multi_scale_info_dic = None
""" dicts containing full resolution image and label"""
self.optimizer_name = None #''lbfgs_ls'
"""name of the optimizer to use"""
self.optimizer_params = {}
"""parameters that should be passed to the optimizer"""
self.optimizer = None
"""optimizer object itself (to be instantiated)"""
self.visualize = True
"""if True figures are created during the run"""
self.visualize_step = 10
"""how often the figures are updated; each self.visualize_step-th iteration"""
self.nrOfIterations = None
"""the maximum number of iterations for the optimizer"""
self.current_epoch = None
"""Can be set externally, so the optimizer knows in which epoch we are"""
self.save_fig=False
""" save fig during the visualization"""
self.save_fig_path=None
""" the path for saving figures"""
self.save_fig_num =-1
""" the max num of the fig to be saved during one call, set -1 to save all"""
self.pair_name=None
""" name list of the registration pair """
self.iter_count = 0
""" count of the iterations over multi-resolution"""
self.recording_step = None
"""sets the step-size for recording all intermediate results to the history"""
def set_recording_step(self, step):
assert step > 0, 'Recording step needs to be larger than 0'
self.recording_step = step
self.history['recording'] = []
def set_current_epoch(self,current_epoch):
self.current_epoch = current_epoch
def get_current_epoch(self):
return self.current_epoch
def turn_visualization_on(self):
"""
Turns on visualization during the run
"""
self.visualize = True
def turn_visualization_off(self):
"""
Turns off visualization during the run
"""
self.visualize = False
def set_visualization(self, vis):
"""
Set if visualization should be on (True) or off (False)
:param vis: visualization status on (True) or off (False)
"""
self.visualize = vis
def get_visualization(self):
"""
Returns the visualization status
:return: Returns True if visualizations will be displayed and False otherwise
"""
return self.visualize
def set_visualize_step(self, nr_step):
"""
Set after how many steps a visualization should be updated
:param nr_step:
"""
self.visualize_step = nr_step
def get_visualize_step(self):
"""
Returns after how many steps visualizations are updated
:return: after how many steps visualizations are updated
"""
return self.visualize_step
def set_save_fig(self,save_fig):
"""
:param save_fig: True: save the visualized figs
:return:
"""
self.save_fig = save_fig
def get_save_fig(self):
"""
:param save_fig: True: get the visualized figs
:return:
"""
return self.save_fig
def set_save_fig_path(self, save_fig_path):
"""
the path of saved figures, default is the ../data/expr_name
:param save_fig_path:
:return:
"""
self.save_fig_path = save_fig_path
def get_save_fig_path(self):
"""
the path of saved figures, default is the ../data/expr_name
:param save_fig_path:
:return:
"""
return self.save_fig_path
def set_save_fig_num(self, save_fig_num=-1):
"""
set the num of the fig to save
:param save_fig_num:
:return:
"""
self.save_fig_num = save_fig_num
def get_save_fig_num(self):
"""
set the num of the fig to save
:param save_fig_num:
:return:
"""
return self.save_fig_num
def set_expr_name(self, expr_name):
"""
the name of experiments
:param expr_name:
:return:
"""
self.expr_name = expr_name
def get_expr_name(self):
"""
the name of experiments
:param expr_name:
:return:
"""
return self.expr_name
def set_pair_name(self, pair_name):
self.pair_name = pair_name
def get_pair_name(self):
return self.pair_name
def register(self, ISource, ITarget):
"""
Registers the source to the target image
:param ISource: source image
:param ITarget: target image
:return: n/a
"""
self.set_source_image(ISource)
self.set_target_image(ITarget)
self.optimize()
self.write_parameters_to_settings()
def set_source_image(self, I):
"""
Setting the source image which should be deformed to match the target image
:param I: source image
"""
self.ISource = I
def set_multi_scale_info(self, ISource, ITarget, spacing, LSource=None, LTarget=None):
"""provide full resolution of Image and Label"""
self.multi_scale_info_dic = {'ISource': ISource, 'ITarget': ITarget, 'spacing': spacing, 'LSource': LSource,
'LTarget': LTarget}
def _compute_low_res_image(self,I,params,spacing=None):
low_res_image = None
if self.mapLowResFactor is not None:
low_res_image,_ = self.sampler.downsample_image_to_size(I,spacing,self.lowResSize[2::],self.spline_order)
return low_res_image
def _compute_low_res_label_map(self,label_map,params, spacing=None):
low_res_label_map = None
if self.mapLowResFactor is not None:
low_res_image, _ = self.sampler.downsample_image_to_size(label_map, spacing, self.lowResSize[2::],
0)
return low_res_label_map
def compute_low_res_image_if_needed(self):
"""To be called before the optimization starts"""
if self.multi_scale_info_dic is None:
ISource = self.ISource
ITarget = self.ITarget
LSource = self.LSource
LTarget = self.LTarget
spacing = self.spacing
else:
ISource, ITarget, LSource, LTarget, spacing = self.multi_scale_info_dic['ISource'], self.multi_scale_info_dic['ITarget'],\
self.multi_scale_info_dic['LSource'],self.multi_scale_info_dic['LTarget'],self.multi_scale_info_dic['spacing']
if self.mapLowResFactor is not None:
self.lowResISource = self._compute_low_res_image(ISource,self.params,spacing)
# todo: can be removed to save memory; is more experimental at this point
self.lowResITarget = self._compute_low_res_image(ITarget,self.params,spacing)
if self.LSource is not None and self.LTarget is not None:
self.lowResLSource = self._compute_low_res_label_map(LSource,self.params,spacing)
self.lowResLTarget = self._compute_low_res_label_map(LTarget, self.params,spacing)
def set_source_label(self, LSource):
"""
:param LSource:
:return:
"""
self.LSource = LSource
def set_target_label(self, LTarget):
"""
:param LTarget:
:return:
"""
self.LTarget = LTarget
def get_source_label(self):
return self.LSource
def get_target_label(self):
return self.LTarget
def set_target_image(self, I):
"""
Setting the target image which the source image should match after registration
:param I: target image
"""
self.ITarget = I
def set_optimizer_by_name(self, optimizer_name):
"""
Set the desired optimizer by name (only lbfgs and adam are currently supported)
:param optimizer_name: name of the optimizer (string) to be used
"""
self.optimizer_name = optimizer_name
self.params['optimizer']['name'] = optimizer_name
def get_optimizer_by_name(self):
"""
Get the name (string) of the optimizer that was selected
:return: name (string) of the optimizer
"""
return self.optimizer_name
def set_optimizer(self, opt):
"""
Set the optimizer. Not by name, but instead by passing the optimizer object which should be instantiated
:param opt: optimizer object
"""
self.optimizer = opt
def get_optimizer(self):
"""
Returns the optimizer object which was set to perform the optimization
:return: optimizer object
"""
return self.optimizer
def set_optimizer_params(self, opt_params):
"""
Set the desired parameters of the optimizer. This is done by passing a dictionary, for example, dict(lr=0.01)
:param opt_params: dictionary holding the parameters of an optimizer
"""
self.optimizer_params = opt_params
class SingleScaleRegistrationOptimizer(ImageRegistrationOptimizer):
"""
Optimizer operating on a single scale. Typically this will be the full image resolution.
.. todo::
Check what the best way to adapt the tolerances for the pre-defined optimizers;
tying it to rel_ftol is not really correct.
"""
def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):
super(SingleScaleRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params,compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
if self.mapLowResFactor is not None:
# computes model at a lower resolution than the image similarity
if self.compute_similarity_measure_at_low_res:
self.mf = MF.ModelFactory(self.lowResSize, self.lowResSpacing, self.lowResSize, self.lowResSpacing )
else:
self.mf = MF.ModelFactory(self.sz, self.spacing, self.lowResSize, self.lowResSpacing )
else:
# computes model and similarity at the same resolution
self.mf = MF.ModelFactory(self.sz, self.spacing, self.sz, self.spacing)
"""model factory which will be used to create the model and its loss function"""
self.model = None
"""the model itself"""
self.criterion = None
"""the loss function"""
self.initialMap = None
"""initial map, will be needed for map-based solutions; by default this will be the identity map, but can be set to something different externally"""
self.initialInverseMap = None
"""initial inverse map; will be the same as the initial map, unless it was set externally"""
self.map0_inverse_external = None
"""initial inverse map, set externally, will be needed for map-based solutions; by default this will be the identity map, but can be set to something different externally"""
self.map0_external = None
"""intial map, set externally"""
self.lowResInitialMap = None
"""low res initial map, by default the identity map, will be needed for map-based solutions which are computed at lower resolution"""
self.lowResInitialInverseMap = None
"""low res initial inverse map, by default the identity map, will be needed for map-based solutions which are computed at lower resolution"""
self.weight_map =None
"""init_weight map, which only used by metric learning models"""
self.optimizer_instance = None
"""the optimizer instance to perform the actual optimization"""
c_params = self.params[('optimizer', {}, 'optimizer settings')]
self.weight_clipping_type = c_params[('weight_clipping_type','none','Type of weight clipping that should be used [l1|l2|l1_individual|l2_individual|l1_shared|l2_shared|None]')]
self.weight_clipping_type = self.weight_clipping_type.lower()
"""Type of weight clipping; applied to weights and bias indepdenendtly; norm restricted to weight_clipping_value"""
if self.weight_clipping_type=='none':
self.weight_clipping_type = None
if self.weight_clipping_type!='pre_lsm_weights':
self.weight_clipping_value = c_params[('weight_clipping_value', 1.0, 'Value to which the norm is being clipped')]
"""Desired norm after clipping"""
extent = self.spacing * self.sz[2:]
max_extent = max(extent)
clip_params = c_params[('gradient_clipping',{},'clipping settings for the gradient for optimization')]
self.clip_display = clip_params[('clip_display', True, 'If set to True displays if clipping occurred')]
self.clip_individual_gradient = clip_params[('clip_individual_gradient',False,'If set to True, the gradient for the individual parameters will be clipped')]
self.clip_individual_gradient_value = clip_params[('clip_individual_gradient_value',max_extent,'Value to which the gradient for the individual parameters is clipped')]
self.clip_shared_gradient = clip_params[('clip_shared_gradient', True, 'If set to True, the gradient for the shared parameters will be clipped')] # todo recover the clip gradient,or it may cause unstable
self.clip_shared_gradient_value = clip_params[('clip_shared_gradient_value', 1.0, 'Value to which the gradient for the shared parameters is clipped')]
self.scheduler = None # for the step size scheduler
self.patience = None # for the step size scheduler
self._use_external_scheduler = False
self.rec_energy = None
self.rec_similarityEnergy = None
self.rec_regEnergy = None
self.rec_opt_par_loss_energy = None
self.rec_phiWarped = None
self.rec_phiInverseWarped = None
self.rec_IWarped = None
self.last_energy = None
self.rel_f = None
self.rec_custom_optimizer_output_string = ''
"""the evaluation information"""
self.rec_custom_optimizer_output_values = None
self.delayed_model_parameters = None
self.delayed_model_parameters_still_to_be_set = False
self.delayed_model_state_dict = None
self.delayed_model_state_dict_still_to_be_set = False
# to be able to transfer state and parameters
self._sgd_par_list = None # holds the list of parameters
self._sgd_par_names = None # holds the list of names associated with these parameters
self._sgd_name_to_model_par = None # allows mapping from name to model parameter
self._sgd_split_shared = None # keeps track if the shared states were split or not
self._sgd_split_individual = None # keeps track if the individual states were split or not
self.over_scale_iter_count = None #accumulated iter count over different scales
self.n_scale = None #the index of current scale, torename and document todo
def write_parameters_to_settings(self):
if self.model is not None:
self.model.write_parameters_to_settings()
def get_sgd_split_shared(self):
return self._sgd_split_shared
def get_sgd_split_indvidual(self):
return self._sgd_split_individual
def get_checkpoint_dict(self):
if self.model is not None and self.optimizer_instance is not None:
d = super(SingleScaleRegistrationOptimizer, self).get_checkpoint_dict()
d['model'] = dict()
d['model']['parameters'] = self.model.get_registration_parameters_and_buffers()
d['model']['size'] = self.model.sz
d['model']['spacing'] = self.model.spacing
d['optimizer_state'] = self.optimizer_instance.state_dict()
return d
else:
raise ValueError('Unable to create checkpoint, because either the model or the optimizer have not been initialized')
def load_checkpoint_dict(self,d,load_optimizer_state=False):
if self.model is not None and self.optimizer_instance is not None:
self.model.set_registration_parameters(d['model']['parameters'],d['model']['size'],d['model']['spacing'])
if load_optimizer_state:
try:
self.optimizer_instance.load_state_dict(d['optimizer_state'])
print('INFO: Was able to load the previous optimzer state from checkpoint data')
except:
print('INFO: Could not load the previous optimizer state')
else:
print('WARNING: Turned off the loading of the optimizer state')
else:
raise ValueError('Cannot load checkpoint dictionary, because either the model or the optimizer have not been initialized')
def get_opt_par_energy(self):
"""
Energy for optimizer parameters
:return:
"""
return self.rec_opt_par_loss_energy.cpu().item()
def get_custom_output_values(self):
"""
Custom output values
:return:
"""
return self.rec_custom_optimizer_output_values
def get_energy(self):
"""
Returns the current energy
:return: Returns a tuple (energy, similarity energy, regularization energy)
"""
return self.rec_energy.cpu().item(), self.rec_similarityEnergy.cpu().item(), self.rec_regEnergy.cpu().item()
def get_warped_image(self):
"""
Returns the warped image
:return: the warped image
"""
if self.useMap:
cmap = self.get_map()
# and now warp it
return utils.compute_warped_image_multiNC(self.ISource, cmap, self.spacing, self.spline_order,zero_boundary=True)
else:
return self.rec_IWarped
def get_warped_label(self):
"""
Returns the warped label
:return: the warped label
"""
if self.useMap:
cmap = self.get_map()
return utils.get_warped_label_map(self.LSource, cmap, self.spacing)
else:
return None
def get_map(self):
"""
Returns the deformation map
:return: deformation map
"""
return self.rec_phiWarped
def get_inverse_map(self):
"""
Returns the deformation map
:return: deformation map
"""
return self.rec_phiInverseWarped
def set_n_scale(self, n_scale):
"""
the path of saved figures, default is the ../data/expr_name
:param save_fig_path:
:return:
"""
self.n_scale = n_scale
def set_over_scale_iter_count(self, iter_count):
self.over_scale_iter_count = iter_count
def _create_initial_maps(self):
if self.useMap:
# create the identity map [-1,1]^d, since we will use a map-based implementation
if self.map0_external is not None:
self.initialMap = self.map0_external
else:
id = utils.identity_map_multiN(self.sz, self.spacing)
self.initialMap = AdaptVal(torch.from_numpy(id))
if self.map0_inverse_external is not None:
self.initialInverseMap = self.map0_inverse_external
else:
id =utils.identity_map_multiN(self.sz, self.spacing)
self.initialInverseMap = AdaptVal(torch.from_numpy(id))
if self.mapLowResFactor is not None:
# create a lower resolution map for the computations
if self.map0_external is None:
lowres_id = utils.identity_map_multiN(self.lowResSize, self.lowResSpacing)
self.lowResInitialMap = AdaptVal(torch.from_numpy(lowres_id))
else:
sampler = IS.ResampleImage()
lowres_id, _ = sampler.downsample_image_to_size(self.initialMap , self.spacing,self.lowResSize[2::] , 1,zero_boundary=False)
self.lowResInitialMap = AdaptVal(lowres_id)
if self.map0_inverse_external is None:
lowres_id = utils.identity_map_multiN(self.lowResSize, self.lowResSpacing)
self.lowResInitialInverseMap = AdaptVal(torch.from_numpy(lowres_id))
else:
sampler = IS.ResampleImage()
lowres_inverse_id, _ = sampler.downsample_image_to_size(self.initialInverseMap, self.spacing, self.lowResSize[2::],
1, zero_boundary=False)
self.lowResInitialInverseMap = AdaptVal(lowres_inverse_id)
def set_model(self, modelName):
"""
Sets the model that should be solved
:param modelName: name of the model that should be solved (string)
"""
self.params['model']['registration_model']['type'] = ( modelName, "['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with '_map' or '_image' suffix" )
self.model, self.criterion = self.mf.create_registration_model(modelName, self.params['model'],compute_inverse_map=self.compute_inverse_map)
print(self.model)
self._create_initial_maps()
def set_initial_map(self,map0,map0_inverse=None):
"""
Sets the initial map (overwrites the default identity map)
:param map0: intial map
:param map0_inverse: initial inverse map
:return: n/a
"""
self.map0_external = map0
self.map0_inverse_external = map0_inverse
if self.initialMap is not None:
# was already set, so let's modify it
self._create_initial_maps()
def set_initial_weight_map(self,weight_map,freeze_weight=False):
"""
Sets the initial map (overwrites the default identity map)
:param map0: intial map
:param map0_inverse: initial inverse map
:return: n/a
"""
if self.mapLowResFactor is not None:
sampler = IS.ResampleImage()
weight_map, _ = sampler.downsample_image_to_size(weight_map, self.spacing, self.lowResSize[2::], 1,
zero_boundary=False)
self.model.local_weights.data = weight_map
if freeze_weight:
self.model.freeze_adaptive_regularizer_param()
def get_initial_map(self):
"""
Returns the initial map
:return: initial map
"""
if self.initialMap is not None:
return self.initialMap
elif self.map0_external is not None:
return self.map0_external
else:
return None
def get_initial_inverse_map(self):
"""
Returns the initial inverse map
:return: initial inverse map
"""
if self.initialInverseMap is not None:
return self.initialInverseMap
elif self.map0_inverse_external is not None:
return self.map0_inverse_external
else:
return None
def add_similarity_measure(self, sim_name, sim_measure):
"""
Adds a custom similarity measure.
:param sim_name: name of the similarity measure (string)
:param sim_measure: similarity measure itself (class object that can be instantiated)
"""
self.criterion.add_similarity_measure(sim_name, sim_measure)
self.params['model']['registration_model']['similarity_measure']['type'] = (sim_name, 'was customized; needs to be expplicitly instantiated, cannot be loaded')
def add_model(self, model_name, model_network_class, model_loss_class, use_map, model_description='custom model'):
"""
Adds a custom model and its loss function
:param model_name: name of the model to be added (string)
:param model_network_class: registration model itself (class object that can be instantiated)
:param model_loss_class: registration loss (class object that can be instantiated)
:param use_map: True/False: specifies if model uses a map or not
:param model_description: optional model description
"""
self.mf.add_model(model_name, model_network_class, model_loss_class, use_map, model_description)
self.params['model']['registration_model']['type'] = (model_name, 'was customized; needs to be explicitly instantiated, cannot be loaded')
def set_model_state_dict(self,sd):
"""
Sets the state dictionary of the model
:param sd: state dictionary
:return: n/a
"""
if self.optimizer_has_been_initialized:
self.model.load_state_dict(sd)
self.delayed_model_state_dict_still_to_be_set = False
else:
self.delayed_model_state_dict_still_to_be_set = True
self.delayed_model_state_dict = sd
def get_model_state_dict(self):
"""
Returns the state dictionary of the model
:return: state dictionary
"""
return self.model.state_dict()
def set_model_parameters(self, p):
"""
Set the parameters of the registration model
:param p: parameters
"""
if self.optimizer_has_been_initialized:
if (self.useMap) and (self.mapLowResFactor is not None):
self.model.set_registration_parameters(p, self.lowResSize, self.lowResSpacing)
else:
self.model.set_registration_parameters(p, self.sz, self.spacing)
self.delayed_model_parameters_still_to_be_set = False
else:
self.delayed_model_parameters_still_to_be_set = True
self.delayed_model_parameters = p
def _is_vector(self,d):
sz = d.size()
if len(sz)==1:
return True
else:
return False
def _is_tensor(self,d):
sz = d.size()
if len(sz)>1:
return True
else:
return False
def _aux_do_weight_clipping_norm(self,pars,desired_norm):
"""does weight clipping but only for conv or bias layers (assuming they are named as such); be careful with the namimg here"""
if self.weight_clipping_value > 0:
for key in pars:
# only do the clipping if it is a conv layer or a bias term
if key.lower().find('conv')>0 or key.lower().find('bias')>0:
p = pars[key]
if self._is_vector(p.data):
# just normalize this vector component-by-component, norm does not matter here as these are only scalars
p.data = p.data.clamp_(-self.weight_clipping_value, self.weight_clipping_value)
elif self._is_tensor(p.data):
# normalize sample-by-sample individually
for b in range(p.data.size()[0]):
param_norm = p.data[b, ...].norm(desired_norm)
if param_norm > self.weight_clipping_value:
clip_coef = self.weight_clipping_value / param_norm
p.data[b, ...].mul_(clip_coef)
else:
raise ValueError('Unknown data type; I do not know how to clip this')
def _do_shared_weight_clipping_pre_lsm(self):
multi_gaussian_weights = self.params['model']['registration_model']['forward_model']['smoother'][('multi_gaussian_weights', -1, 'the used multi gaussian weights')]
if multi_gaussian_weights==-1:
raise ValueError('The multi-gaussian weights should have been set before')
multi_gaussian_weights = np.array(multi_gaussian_weights)
sp = self.get_shared_model_parameters()
for key in sp:
if key.lower().find('pre_lsm_weights') > 0:
p = sp[key]
sz = p.size() #0 dim is weight dimension
if sz[0]!=len(multi_gaussian_weights):
raise ValueError('Number of multi-Gaussian weights needs to be {}, but got {}'.format(sz[0],len(multi_gaussian_weights)))
for w in range(sz[0]):
# this is to assure that the weights are always between 0 and 1 (when using the WeightedLinearSoftmax
p[w,...].data.clamp_(0.0-multi_gaussian_weights[w],1.0-multi_gaussian_weights[w])
def _do_individual_weight_clipping_l1(self):
ip = self.get_individual_model_parameters()
self._aux_do_weight_clipping_norm(pars=ip,desired_norm=1)
def _do_shared_weight_clipping_l1(self):
sp = self.get_shared_model_parameters()
self._aux_do_weight_clipping_norm(pars=sp,desired_norm=1)
def _do_individual_weight_clipping_l2(self):
ip = self.get_individual_model_parameters()
self._aux_do_weight_clipping_norm(pars=ip, desired_norm=2)
def _do_shared_weight_clipping_l2(self):
sp = self.get_shared_model_parameters()
self._aux_do_weight_clipping_norm(pars=sp, desired_norm=2)
def _do_weight_clipping(self):
"""performs weight clipping, if desired"""
if self.weight_clipping_type is not None:
possible_modes = ['l1', 'l2', 'l1_individual', 'l2_individual', 'l1_shared', 'l2_shared', 'pre_lsm_weights']
if self.weight_clipping_type in possible_modes:
if self.weight_clipping_type=='l1':
self._do_shared_weight_clipping_l1()
self._do_individual_weight_clipping_l1()
elif self.weight_clipping_type=='l2':
self._do_shared_weight_clipping_l2()
self._do_individual_weight_clipping_l2()
elif self.weight_clipping_type=='l1_individual':
self._do_individual_weight_clipping_l1()
elif self.weight_clipping_type=='l2_individual':
self._do_individual_weight_clipping_l2()
elif self.weight_clipping_type=='l1_shared':
self._do_shared_weight_clipping_l1()
elif self.weight_clipping_type=='l2_shared':
self._do_shared_weight_clipping_l2()
elif self.weight_clipping_type=='pre_lsm_weights':
self._do_shared_weight_clipping_pre_lsm()
else:
raise ValueError('Illegal weight clipping type: {}'.format(self.weight_clipping_type))
else:
raise ValueError('Weight clipping needs to be: [None|l1|l2|l1_individual|l2_individual|l1_shared|l2_shared]')
def get_model_parameters(self):
"""
Returns the parameters of the model
:return: model parameters
"""
return self.model.get_registration_parameters()
def set_shared_model_parameters(self,p):
"""
Set only the shared parameters of the model
:param p: shared registration parameters as an ordered dict
:return: n/a
"""
self.model.set_shared_registration_parameters(p)
def get_shared_model_parameters_and_buffers(self):
"""
Returns only the model parameters that are shared between models and the shared buffers associated w/ it.
:return: shared model parameters and buffers
"""
return self.model.get_shared_registration_parameters_and_buffers()
def get_shared_model_parameters(self):
"""
Returns only the model parameters that are shared between models.
:return: shared model parameters
"""
return self.model.get_shared_registration_parameters()
def set_individual_model_parameters(self,p):
"""
Set only the individual parameters of the model
:param p: individual registration parameters as an ordered dict
:return: n/a
"""
self.model.set_individual_registration_parameters(p)
def get_individual_model_parameters(self):
"""
Returns only the model parameters that individual to a model (i.e., not shared).
:return: individual model parameters
"""
return self.model.get_individual_registration_parameters()
def _collect_individual_or_shared_parameters_in_list(self,pars):
pl = []
for p_key in pars:
pl.append(pars[p_key])
return pl
def load_shared_state_dict(self,sd):
"""
Loads the shared part of a state dictionary
:param sd: shared state dictionary
:return: n/a
"""
self.model.load_shared_state_dict(sd)
def shared_state_dict(self):
"""
Returns the shared part of a state dictionary
:return:
"""
return self.model.shared_state_dict()
def load_individual_state_dict(self):
raise ValueError('Not yet implemented')
def individual_state_dict(self):
raise ValueError('Not yet implemented')
def upsample_model_parameters(self, desiredSize):
"""
Upsamples the model parameters
:param desiredSize: desired size after upsampling, e.g., [100,20,50]
:return: returns a tuple (upsampled_parameters,upsampled_spacing)
"""
return self.model.upsample_registration_parameters(desiredSize)
def downsample_model_parameters(self, desiredSize):
"""
Downsamples the model parameters
:param desiredSize: desired size after downsampling, e.g., [50,50,40]
:return: returns a tuple (downsampled_parameters,downsampled_spacing)
"""
return self.model.downsample_registration_parameters(desiredSize)
def _set_number_of_iterations_from_multi_scale(self, nrIter):
"""
Same as set_number_of_iterations with the exception that this is not recored in the parameter structure since it comes from the multi-scale setting
:param nrIter: number of iterations
"""
self.nrOfIterations = nrIter
def set_number_of_iterations(self, nrIter):
"""
Set the number of iterations of the optimizer
:param nrIter: number of iterations
"""
self.params['optimizer'][('single_scale', {}, 'single scale settings')]
self.params['optimizer']['single_scale']['nr_of_iterations'] = (nrIter, 'number of iterations')
self.nrOfIterations = nrIter
def get_number_of_iterations(self):
"""
Returns the number of iterations of the solver
:return: number of set iterations
"""
return self.nrOfIterations
def _closure(self):
self.optimizer_instance.zero_grad()
# 1) Forward pass: Compute predicted y by passing x to the model
# 2) Compute loss
# first define variables that will be passed to the model and the criterion (for further use)
over_scale_iter_count = self.iter_count if self.over_scale_iter_count is None else self.over_scale_iter_count + self.iter_count
opt_variables = {'iter': self.iter_count, 'epoch': self.current_epoch, 'scale': self.n_scale,
'over_scale_iter_count': over_scale_iter_count}
self.rec_IWarped, self.rec_phiWarped, self.rec_phiInverseWarped = model_evaluation.evaluate_model_low_level_interface(
model=self.model,
I_source=self.ISource,
opt_variables=opt_variables,
use_map=self.useMap,
initial_map=self.initialMap,
compute_inverse_map=self.compute_inverse_map,
initial_inverse_map=self.initialInverseMap,
map_low_res_factor=self.mapLowResFactor,
sampler=self.sampler,
low_res_spacing=self.lowResSpacing,
spline_order=self.spline_order,
low_res_I_source=self.lowResISource,
low_res_initial_map=self.lowResInitialMap,
low_res_initial_inverse_map=self.lowResInitialInverseMap,
compute_similarity_measure_at_low_res=self.compute_similarity_measure_at_low_res)
# compute the respective losses
if self.useMap:
if self.mapLowResFactor is not None and self.compute_similarity_measure_at_low_res:
loss_overall_energy, sim_energy, reg_energy = self.criterion(self.lowResInitialMap, self.rec_phiWarped,
self.lowResISource, self.lowResITarget,
self.lowResISource,
self.model.get_variables_to_transfer_to_loss_function(),
opt_variables)
else:
loss_overall_energy,sim_energy,reg_energy = self.criterion(self.initialMap, self.rec_phiWarped, self.ISource, self.ITarget, self.lowResISource,
self.model.get_variables_to_transfer_to_loss_function(),
opt_variables)
else:
loss_overall_energy,sim_energy,reg_energy = self.criterion(self.rec_IWarped, self.ISource, self.ITarget,
self.model.get_variables_to_transfer_to_loss_function(),
opt_variables )
# to support consensus optimization we have the option of adding a penalty term
# based on shared parameters
opt_par_loss_energy = self.compute_optimizer_parameter_loss(self.model.get_shared_registration_parameters())
loss_overall_energy = loss_overall_energy + opt_par_loss_energy
loss_overall_energy.backward()
# do gradient clipping
if self.clip_individual_gradient:
current_individual_grad_norm = torch.nn.utils.clip_grad_norm_(
self._collect_individual_or_shared_parameters_in_list(self.get_individual_model_parameters()),
self.clip_individual_gradient_value)
if self.clip_display:
if current_individual_grad_norm>self.clip_individual_gradient_value:
print('INFO: Individual gradient was clipped: {} -> {}'.format(current_individual_grad_norm,self.clip_individual_gradient_value))
if self.clip_shared_gradient:
current_shared_grad_norm = torch.nn.utils.clip_grad_norm_(
self._collect_individual_or_shared_parameters_in_list(self.get_shared_model_parameters()),
self.clip_shared_gradient_value)
if self.clip_display:
if current_shared_grad_norm > self.clip_shared_gradient_value:
print('INFO: Shared gradient was clipped: {} -> {}'.format(current_shared_grad_norm,
self.clip_shared_gradient_value))
self.rec_custom_optimizer_output_string = self.model.get_custom_optimizer_output_string()
self.rec_custom_optimizer_output_values = self.model.get_custom_optimizer_output_values()
self.rec_energy = loss_overall_energy
self.rec_similarityEnergy = sim_energy
self.rec_regEnergy = reg_energy
self.rec_opt_par_loss_energy = opt_par_loss_energy
# if self.useMap:
#
# if self.iter_count % 1 == 0:
# self.rec_energy, self.rec_similarityEnergy, self.rec_regEnergy = self.criterion.get_energy(
# self.identityMap, self.rec_phiWarped, self.ISource, self.ITarget, self.lowResISource, self.model.get_variables_to_transfer_to_loss_function())
# else:
# if self.iter_count % 1 == 0:
# self.rec_energy, self.rec_similarityEnergy, self.rec_regEnergy = self.criterion.get_energy(
# self.rec_IWarped, self.ISource, self.ITarget, self.model.get_variables_to_transfer_to_loss_function())
return loss_overall_energy
def analysis(self, energy, similarityEnergy, regEnergy, opt_par_energy, phi_or_warped_image, custom_optimizer_output_string ='', custom_optimizer_output_values=None, force_visualization=False):
"""
print out the and visualize the result
:param energy:
:param similarityEnergy:
:param regEnergy:
:param opt_par_energy
:param phi_or_warped_image:
:return: returns tuple: first entry True if termination tolerance was reached, otherwise returns False; second entry if the image was visualized
"""
current_batch_size = phi_or_warped_image.size()[0]
was_visualized = False
reached_tolerance = False
cur_energy = utils.t2np(energy.float())
# energy analysis
self._add_to_history('iter', self.iter_count)
self._add_to_history('energy', cur_energy[0])
self._add_to_history('similarity_energy', utils.t2np(similarityEnergy.float()))
self._add_to_history('regularization_energy', utils.t2np(regEnergy.float()))
self._add_to_history('opt_par_energy', utils.t2np(opt_par_energy.float())[0])
if custom_optimizer_output_values is not None:
for key in custom_optimizer_output_values:
self._add_to_history(key,custom_optimizer_output_values[key])
if self.last_energy is not None:
# relative function tolerance: |f(xi)-f(xi+1)|/(1+|f(xi)|)
self.rel_f = abs(self.last_energy - cur_energy) / (1 + abs(cur_energy))
self._add_to_history('relF', self.rel_f[0])
if self.show_iteration_output:
cprint('{iter:5d}-Tot: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} | optParE={optParE:08.4f} | relF={relF:08.4f} | {cos}'
.format(iter=self.iter_count,
energy=utils.get_scalar(cur_energy),
similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())),
regE=utils.get_scalar(utils.t2np(regEnergy.float())),
optParE=utils.get_scalar(utils.t2np(opt_par_energy.float())),
relF=utils.get_scalar(self.rel_f),
cos=custom_optimizer_output_string), 'red')
cprint('{iter:5d}-Img: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} |'
.format(iter=self.iter_count,
energy=utils.get_scalar(cur_energy) / current_batch_size,
similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())) / current_batch_size,
regE=utils.get_scalar(utils.t2np(regEnergy.float())) / current_batch_size), 'blue')
# check if relative convergence tolerance is reached
if self.rel_f < self.rel_ftol:
if self.show_iteration_output:
print('Reached relative function tolerance of = ' + str(self.rel_ftol))
reached_tolerance = True
else:
self._add_to_history('relF', None)
if self.show_iteration_output:
cprint('{iter:5d}-Tot: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} | optParE={optParE:08.4f} | relF= n/a | {cos}'
.format(iter=self.iter_count,
energy=utils.get_scalar(cur_energy),
similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float())),
regE=utils.get_scalar(utils.t2np(regEnergy.float())),
optParE=utils.get_scalar(utils.t2np(opt_par_energy.float())),
cos=custom_optimizer_output_string), 'red')
cprint('{iter:5d}-Img: E={energy:08.4f} | simE={similarityE:08.4f} | regE={regE:08.4f} |'
.format(iter=self.iter_count,
energy=utils.get_scalar(cur_energy)/current_batch_size,
similarityE=utils.get_scalar(utils.t2np(similarityEnergy.float()))/current_batch_size,
regE=utils.get_scalar(utils.t2np(regEnergy.float()))/current_batch_size),'blue')
iter_count = self.iter_count
self.last_energy = cur_energy
if self.recording_step is not None:
if iter_count % self.recording_step == 0 or iter_count == 0:
if self.useMap:
if self.compute_similarity_measure_at_low_res:
I1Warped = utils.compute_warped_image_multiNC(self.lowResISource,
phi_or_warped_image,
self.lowResSpacing,
self.spline_order,
zero_boundary=False)
lowResLWarped = utils.get_warped_label_map(self.lowResLSource,
phi_or_warped_image,
self.spacing)
self.history['recording'].append({
'iter': iter_count,
'iS': utils.t2np(self.ISource),
'iT': utils.t2np(self.ITarget),
'iW': utils.t2np(I1Warped),
'iSL': utils.t2np(self.lowResLSource) if self.lowResLSource is not None else None,
'iTL': utils.t2np(self.lowResLTarget) if self.lowResLTarget is not None else None,
'iWL': utils.t2np(lowResLWarped) if self.lowResLWarped is not None else None,
'phiWarped': utils.t2np(phi_or_warped_image)
})
else:
I1Warped = utils.compute_warped_image_multiNC(self.ISource,
phi_or_warped_image,
self.spacing,
self.spline_order,
zero_boundary=False)
LWarped = None
if self.LSource is not None and self.LTarget is not None:
LWarped = utils.get_warped_label_map(self.LSource,
phi_or_warped_image,
self.spacing)
self.history['recording'].append({
'iter': iter_count,
'iS': utils.t2np(self.ISource),
'iT': utils.t2np(self.ITarget),
'iW': utils.t2np(I1Warped),
'iSL': utils.t2np(self.LSource) if self.LSource is not None else None,
'iTL': utils.t2np(self.LTarget) if self.LTarget is not None else None,
'iWL': utils.t2np(LWarped) if LWarped is not None else None,
'phiWarped': utils.t2np(phi_or_warped_image)
})
else:
self.history['recording'].append({
'iter': iter_count,
'iS': utils.t2np(self.ISource),
'iT': utils.t2np(self.ITarget),
'iW': utils.t2np(phi_or_warped_image)
})
if self.visualize or self.save_fig:
visual_param = {}
visual_param['visualize'] = self.visualize
visual_param['save_fig'] = self.save_fig
visual_param['save_fig_num'] = self.save_fig_num
if self.save_fig:
visual_param['save_fig_path'] = self.save_fig_path
visual_param['save_fig_path_byname'] = os.path.join(self.save_fig_path, 'byname')
visual_param['save_fig_path_byiter'] = os.path.join(self.save_fig_path, 'byiter')
visual_param['pair_name'] = self.pair_name
visual_param['iter'] = 'scale_'+str(self.n_scale) + '_iter_' + str(self.iter_count)
if self.visualize_step and (iter_count % self.visualize_step == 0) or (iter_count == self.nrOfIterations-1) or force_visualization:
was_visualized = True
if self.useMap and self.mapLowResFactor is not None:
vizImage, vizName = self.model.get_parameter_image_and_name_to_visualize(self.lowResISource)
else:
vizImage, vizName = self.model.get_parameter_image_and_name_to_visualize(self.ISource)
if self.useMap:
if self.compute_similarity_measure_at_low_res:
I1Warped = utils.compute_warped_image_multiNC(self.lowResISource,
phi_or_warped_image,
self.lowResSpacing,
self.spline_order,
zero_boundary=False)
lowResLWarped = utils.get_warped_label_map(self.lowResLSource,
phi_or_warped_image,
self.spacing)
vizReg.show_current_images(iter=iter_count,
iS=self.lowResISource,
iT=self.lowResITarget,
iW=I1Warped,
iSL=self.lowResLSource,
iTL=self.lowResLTarget,
iWL=lowResLWarped,
vizImages=vizImage,
vizName=vizName,
phiWarped=phi_or_warped_image,
visual_param=visual_param)
else:
I1Warped = utils.compute_warped_image_multiNC(self.ISource,
phi_or_warped_image,
self.spacing,
self.spline_order,
zero_boundary=False)
vizImage = vizImage if len(vizImage)>2 else None
LWarped = None
if self.LSource is not None and self.LTarget is not None:
LWarped = utils.get_warped_label_map(self.LSource,
phi_or_warped_image,
self.spacing)
vizReg.show_current_images(iter=iter_count,
iS=self.ISource,
iT=self.ITarget,
iW=I1Warped,
iSL=self.LSource,
iTL=self.LTarget,
iWL=LWarped,
vizImages=vizImage,
vizName=vizName,
phiWarped=phi_or_warped_image,
visual_param=visual_param)
else:
vizReg.show_current_images(iter=iter_count,
iS=self.ISource,
iT=self.ITarget,
iW=phi_or_warped_image,
vizImages=vizImage,
vizName=vizName,
phiWarped=None,
visual_param=visual_param)
return reached_tolerance, was_visualized
def _debugging_saving_intermid_img(self,img=None,is_label_map=False, append=''):
folder_path = os.path.join(self.save_fig_path,'debugging')
folder_path = os.path.join(folder_path, self.pair_name[0])
make_dir(folder_path)
file_name = 'scale_'+str(self.n_scale) + '_iter_' + str(self.iter_count)+append
file_name=file_name.replace('.','_')
if is_label_map:
file_name += '_label'
path = os.path.join(folder_path,file_name+'.nii.gz')
im_io = FIO.ImageIO()
im_io.write(path, np.squeeze(img.detach().cpu().numpy()))
# todo: write these parameter/optimizer functions also for shared parameters and all parameters
def set_sgd_shared_model_parameters_and_optimizer_states(self, pars):
"""
Set the individual model parameters and states that may be stored by the optimizer such as the momentum.
Expects as input what get_sgd_individual_model_parameters_and_optimizer_states creates as output,
but potentially multiple copies of it (as generated by a pyTorch dataloader). I.e., it takes in a dataloader sample.
NOTE: currently only supports SGD
:param pars: parameter list as produced by get_sgd_individual_model_parameters_and_optimizer_states
:return: n/a
"""
if self.optimizer_instance is None:
raise ValueError('Optimizer not yet created')
if (self._sgd_par_list is None) or (self._sgd_par_names is None):
raise ValueError(
'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')
if len(pars) == 0:
print('WARNING: found no values')
return
# the optimizer (if properly initialized) already holds pointers to the model parameters and the optimizer states
# so we can set everything in one swoop here
# loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)
# this input will represent a sample from a pytorch dataloader
# wrap the parameters in a list if needed (so we can mirror the setup from get_sgd_...
if type(pars) == list:
use_pars = pars
else:
use_pars = [pars]
for p in use_pars:
if 'is_shared' in p:
if p['is_shared']:
current_name = p['name']
assert (torch.is_tensor(p['model_params']))
current_model_params = p['model_params']
if 'momentum_buffer' in p:
assert (torch.is_tensor(p['momentum_buffer']))
current_momentum_buffer = p['momentum_buffer']
else:
current_momentum_buffer = None
# now we need to match this with the parameters and the state of the SGD optimizer
model_par = self._sgd_name_to_model_par[current_name]
model_par.data.copy_(current_model_params)
# and now do the same with the state
param_state = self.optimizer_instance.state[model_par]
if 'momentum_buffer' in param_state:
param_state['momentum_buffer'].copy_(current_momentum_buffer)
def set_sgd_individual_model_parameters_and_optimizer_states(self, pars):
"""
Set the individual model parameters and states that may be stored by the optimizer such as the momentum.
Expects as input what get_sgd_individual_model_parameters_and_optimizer_states creates as output,
but potentially multiple copies of it (as generated by a pyTorch dataloader). I.e., it takes in a dataloader sample.
NOTE: currently only supports SGD
:param pars: parameter list as produced by get_sgd_individual_model_parameters_and_optimizer_states
:return: n/a
"""
if self.optimizer_instance is None:
raise ValueError('Optimizer not yet created')
if (self._sgd_par_list is None) or (self._sgd_par_names is None):
raise ValueError(
'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')
if len(pars) == 0:
print('WARNING: found no values')
return
# the optimizer (if properly initialized) already holds pointers to the model parameters and the optimizer states
# so we can set everything in one swoop here
# loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)
# this input will represent a sample from a pytorch dataloader
# wrap the parameters in a list if needed (so we can mirror the setup from get_sgd_...
if type(pars)==list:
use_pars = pars
else:
use_pars = [pars]
for p in use_pars:
if 'is_shared' in p:
if not p['is_shared'][0]: # need to grab the first one, because the dataloader replicated these entries
current_name = p['name'][0]
assert( torch.is_tensor(p['model_params']))
current_model_params = p['model_params']
if 'momentum_buffer' in p:
assert( torch.is_tensor(p['momentum_buffer']) )
current_momentum_buffer = p['momentum_buffer']
else:
current_momentum_buffer = None
# now we need to match this with the parameters and the state of the SGD optimizer
model_par = self._sgd_name_to_model_par[current_name]
model_par.data.copy_(current_model_params)
# and now do the same with the state
param_state = self.optimizer_instance.state[model_par]
if 'momentum_buffer' in param_state:
param_state['momentum_buffer'].copy_(current_momentum_buffer)
def _convert_obj_with_parameters_to_obj_with_tensors(self, p):
"""
Converts structures that consist of lists and dictionaries with parameters to tensors
:param p: parameter structure
:return: object with parameters converted to tensors
"""
if type(p) == list:
ret_p = []
for e in p:
ret_p.append(self._convert_obj_with_parameters_to_obj_with_tensors(e))
return ret_p
elif type(p) == dict:
ret_p = dict()
for key in p:
ret_p[key] = self._convert_obj_with_parameters_to_obj_with_tensors((p[key]))
return ret_p
elif type(p) == torch.nn.parameter.Parameter:
return p.data
else:
return p
def get_sgd_shared_model_parameters(self):
"""
Gets the model parameters that are shared.
:return:
"""
if self.optimizer_instance is None:
raise ValueError('Optimizer not yet created')
if (self._sgd_par_list is None) or (self._sgd_par_names is None):
raise ValueError(
'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')
d = []
# loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)
for group in self.optimizer_instance.param_groups:
group_dict = dict()
group_dict['params'] = []
for p in group['params']:
current_group_params = dict()
# let's first see if this is a shared state
if self._sgd_par_names[p]['is_shared']:
# keep track of the names so we can and batch, so we can read it back in
current_group_params.update(self._sgd_par_names[p])
# now deal with the optimizer state if available
current_group_params['model_params'] = self._convert_obj_with_parameters_to_obj_with_tensors(p)
group_dict['params'].append(current_group_params)
d.append(group_dict)
return d
def get_sgd_individual_model_parameters_and_optimizer_states(self):
"""
Gets the individual model parameters and states that may be stored by the optimizer such as the momentum.
NOTE: currently only supports SGD
:return:
"""
if self.optimizer_instance is None:
raise ValueError('Optimizer not yet created')
if (self._sgd_par_list is None) or (self._sgd_par_names is None):
raise ValueError(
'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')
d = []
# loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)
for group in self.optimizer_instance.param_groups:
group_dict = dict()
group_dict['weight_decay'] = group['weight_decay']
group_dict['momentum'] = group['momentum']
group_dict['dampening'] = group['dampening']
group_dict['nesterov'] = group['nesterov']
group_dict['lr'] = group['lr']
group_dict['params'] = []
for p in group['params']:
current_group_params = dict()
# let's first see if this is a shared state
if not self._sgd_par_names[p]['is_shared']:
# keep track of the names so we can and batch, so we can read it back in
current_group_params.update(self._sgd_par_names[p])
# now deal with the optimizer state if available
current_group_params['model_params'] = self._convert_obj_with_parameters_to_obj_with_tensors(p)
if group['momentum'] != 0:
param_state = self.optimizer_instance.state[p]
if 'momentum_buffer' in param_state:
current_group_params['momentum_buffer'] = self._convert_obj_with_parameters_to_obj_with_tensors(param_state['momentum_buffer'])
group_dict['params'].append(current_group_params)
d.append(group_dict)
return d
def _remove_state_variables_for_individual_parameters(self,individual_pars):
"""
Removes the optimizer state for individual parameters.
This is required at the beginning as we do not want to reuse the SGD momentum for example for an unrelated registration.
:param individual_pars: individual parameters are returned by get_sgd_individual_model_parameters_and_optimizer_states
:return: n/a
"""
if self.optimizer_instance is None:
raise ValueError('Optimizer not yet created')
if (self._sgd_par_list is None) or (self._sgd_par_names is None):
raise ValueError(
'sgd par list and/or par names not available; needs to be created before passing it to the optimizer')
# loop over the SGD parameter groups (this is modeled after the code in the SGD optimizer)
for group in self.optimizer_instance.param_groups:
for p in group['params']:
# let's first see if this is a shared state
if not self._sgd_par_names[p]['is_shared']:
# we want to delete the state of this one
self.optimizer_instance.state.pop(p)
def _create_optimizer_parameter_dictionary(self,individual_pars, shared_pars,
settings_individual=dict(), settings_shared=dict()):
par_list = []
"""List of parameters that can directly be passed to an optimizer; different list elements define different parameter groups"""
par_names = dict()
"""dictionary which maps from a parameters id (i.e., memory) to its description: name/is_shared"""
# name is the name of the variable
# is_shared keeps track of if a parameter was declared shared (opposed to individual, which we need for registrations)
names_to_par = dict()
"""dictionary which maps from a parameter name back to the parameter"""
# first deal with the individual parameters
pl_ind, par_to_name_ind = utils.get_parameter_list_and_par_to_name_dict_from_parameter_dict(individual_pars)
#cd = {'params': pl_ind}
cd = {'params': [p for p in pl_ind if p.requires_grad]}
cd.update(settings_individual)
par_list.append(cd)
# add all the names
for current_par, key in zip(pl_ind, par_to_name_ind):
par_names[key] = {'name': par_to_name_ind[key], 'is_shared': False}
names_to_par[par_to_name_ind[key]] = current_par
# now deal with the shared parameters
pl_shared, par_to_name_shared = utils.get_parameter_list_and_par_to_name_dict_from_parameter_dict(shared_pars)
#cd = {'params': pl_shared}
cd = {'params': [p for p in pl_shared if p.requires_grad]}
cd.update(settings_shared)
par_list.append(cd)
for current_par, key in zip(pl_shared, par_to_name_shared):
par_names[key] = {'name': par_to_name_shared[key], 'is_shared': True}
names_to_par[par_to_name_shared[key]] = current_par
return par_list, par_names, names_to_par
def _write_out_shared_parameters(self, model_pars, filename):
# just write out the ones that are shared
for group in model_pars:
if 'params' in group:
was_shared_group = False # there can only be one
# create lists that will hold the information for the different batches
cur_pars = []
# now iterate through the current parameter list
for p in group['params']:
needs_to_be_saved = True
if 'is_shared' in p:
if not p['is_shared']:
needs_to_be_saved = False
if needs_to_be_saved:
# we found a shared entry
was_shared_group = True
cur_pars.append(p)
# now we have the parameter list for one of the elements of the batch and we can write it out
if was_shared_group: # otherwise will be overwritten by a later parameter group
torch.save(cur_pars, filename)
def _write_out_individual_parameters(self, model_pars, filenames):
batch_size = len(filenames)
# just write out the ones that are individual
for group in model_pars:
if 'params' in group:
was_individual_group = False # there can only be one
# create lists that will hold the information for the different batches
for b in range(batch_size):
cur_pars = []
# now iterate through the current parameter list
for p in group['params']:
if 'is_shared' in p:
# we found an individual entry
if not p['is_shared']:
was_individual_group = True
# now go through this dictionary, extract the current batch info in it,
# and append it to the current batch parameter list
cur_dict = dict()
for p_el in p:
if p_el == 'name':
cur_dict['name'] = p[p_el]
elif p_el == 'is_shared':
cur_dict['is_shared'] = p[p_el]
else:
# this will be a tensor so we need to extract the information for the current batch
cur_dict[p_el] = p[p_el][b, ...]
cur_pars.append(cur_dict)
# now we have the parameter list for one of the elements of the batch and we can write it out
if was_individual_group: # otherwise will be overwritten by a later parameter group
torch.save(cur_pars, filenames[b])
def _get_optimizer_instance(self):
if (self.model is None) or (self.criterion is None):
raise ValueError('Please specify a model to solve with set_model first')
# first check if an optimizer was specified externally
if self.optimizer is not None:
# simply instantiate it
if self.optimizer_name is not None:
print('Warning: optimizer name = ' + str(self.optimizer_name) +
' specified, but ignored since optimizer was set explicitly')
opt_instance = self.optimizer(self.model.parameters(), **self.optimizer_params)
return opt_instance
else:
# select it by name
# TODO: Check what the best way to adapt the tolerances is here; tying it to rel_ftol is not really correct
if self.optimizer_name is None:
raise ValueError('Need to select an optimizer')
elif self.optimizer_name == 'lbfgs_ls':
if self.last_successful_step_size_taken is not None:
desired_lr = self.last_successful_step_size_taken
else:
desired_lr = 1.0
max_iter = self.params['optimizer']['lbfgs'][('max_iter',1,'maximum number of iterations')]
max_eval = self.params['optimizer']['lbfgs'][('max_eval',5,'maximum number of evaluation')]
history_size = self.params['optimizer']['lbfgs'][('history_size',5,'Size of the optimizer history')]
line_search_fn = self.params['optimizer']['lbfgs'][('line_search_fn','backtracking','Type of line search function')]
opt_instance = CO.LBFGS_LS(self.model.parameters(),
lr=desired_lr, max_iter=max_iter, max_eval=max_eval,
tolerance_grad=self.rel_ftol * 10, tolerance_change=self.rel_ftol,
history_size=history_size, line_search_fn=line_search_fn)
return opt_instance
elif self.optimizer_name == 'sgd':
#if self.last_successful_step_size_taken is not None:
# desired_lr = self.last_successful_step_size_taken
#else:
if self.default_learning_rate is not None:
current_default_learning_rate = self.default_learning_rate
self.params['optimizer']['sgd']['individual']['lr'] = current_default_learning_rate
self.params['optimizer']['sgd']['shared']['lr'] = current_default_learning_rate
else:
current_default_learning_rate = 0.01
desired_lr_individual = self.params['optimizer']['sgd']['individual'][('lr',current_default_learning_rate,'desired learning rate')]
sgd_momentum_individual = self.params['optimizer']['sgd']['individual'][('momentum',0.9,'sgd momentum')]
sgd_dampening_individual = self.params['optimizer']['sgd']['individual'][('dampening',0.0,'sgd dampening')]
sgd_weight_decay_individual = self.params['optimizer']['sgd']['individual'][('weight_decay',0.0,'sgd weight decay')]
sgd_nesterov_individual = self.params['optimizer']['sgd']['individual'][('nesterov',True,'use Nesterove scheme')]
desired_lr_shared = self.params['optimizer']['sgd']['shared'][('lr', current_default_learning_rate, 'desired learning rate')]
sgd_momentum_shared = self.params['optimizer']['sgd']['shared'][('momentum', 0.9, 'sgd momentum')]
sgd_dampening_shared = self.params['optimizer']['sgd']['shared'][('dampening', 0.0, 'sgd dampening')]
sgd_weight_decay_shared = self.params['optimizer']['sgd']['shared'][('weight_decay', 0.0, 'sgd weight decay')]
sgd_nesterov_shared = self.params['optimizer']['sgd']['shared'][('nesterov', True, 'use Nesterove scheme')]
settings_shared = {'momentum': sgd_momentum_shared,
'dampening': sgd_dampening_shared,
'weight_decay': sgd_weight_decay_shared,
'nesterov': sgd_nesterov_shared,
'lr': desired_lr_shared}
settings_individual = {'momentum': sgd_momentum_individual,
'dampening': sgd_dampening_individual,
'weight_decay': sgd_weight_decay_individual,
'nesterov': sgd_nesterov_individual,
'lr': desired_lr_individual}
self._sgd_par_list, self._sgd_par_names, self._sgd_name_to_model_par = self._create_optimizer_parameter_dictionary(
self.model.get_individual_registration_parameters(),
self.model.get_shared_registration_parameters(),
settings_individual=settings_individual,
settings_shared=settings_shared)
opt_instance = torch.optim.SGD(self._sgd_par_list)
return opt_instance
elif self.optimizer_name == 'adam':
if self.last_successful_step_size_taken is not None:
desired_lr = self.last_successful_step_size_taken
else:
if self.default_learning_rate is not None:
current_default_learning_rate = self.default_learning_rate
self.params['optimizer']['adam']['lr'] = current_default_learning_rate
else:
current_default_learning_rate = 0.01
desired_lr = self.params['optimizer']['adam'][('lr',current_default_learning_rate,'desired learning rate')]
adam_betas = self.params['optimizer']['adam'][('betas',[0.9,0.999],'adam betas')]
adam_eps = self.params['optimizer']['adam'][('eps',self.rel_ftol,'adam eps')]
adam_weight_decay = self.params['optimizer']['adam'][('weight_decay',0.0,'adam weight decay')]
opt_instance = torch.optim.Adam(self.model.parameters(), lr=desired_lr,
betas=adam_betas,
eps=adam_eps,
weight_decay=adam_weight_decay)
return opt_instance
else:
raise ValueError('Optimizer = ' + str(self.optimizer_name) + ' not yet supported')
def _set_all_still_missing_parameters(self):
if self.optimizer_name is None:
self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]
if self.model is None:
model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', "['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'")]
self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]
self.set_model( model_name )
if self.nrOfIterations is None: # not externally set, so this will not be a multi-scale solution
self.params['optimizer'][('single_scale', {}, 'single scale settings')]
self.nrOfIterations = self.params['optimizer']['single_scale'][('nr_of_iterations', 10, 'number of iterations')]
# get the optimizer
if self.optimizer_instance is None:
self.optimizer_instance = self._get_optimizer_instance()
if USE_CUDA:
self.model = self.model.cuda()
self.compute_low_res_image_if_needed()
self.optimizer_has_been_initialized = True
def set_scheduler_patience(self,patience):
self.params['optimizer']['scheduler']['patience'] = patience
self.scheduler_patience = patience
def set_scheduler_patience_silent(self,patience):
self.scheduler_patience = patience
def get_scheduler_patience(self):
return self.scheduler_patience
def _set_use_external_scheduler(self):
self._use_external_scheduler = True
def _set_use_internal_scheduler(self):
self._use_external_scheduler = False
def _get_use_external_scheduler(self):
return self._use_external_scheduler
def _get_dictionary_to_pass_to_integrator(self):
"""
This is experimental to allow passing additional parameters to integrators/smoothers, etc.
:return: dictionary
"""
d = dict()
if self.mapLowResFactor is not None:
d['I0'] = self.lowResISource
d['I1'] = self.lowResITarget
else:
d['I0'] = self.ISource
d['I1'] = self.ITarget
return d
def optimize(self):
"""
Do the single scale optimization
"""
self._set_all_still_missing_parameters()
# in this way model parameters can be "set" before the optimizer has been properly initialized
if self.delayed_model_parameters_still_to_be_set:
print('Setting model parameters, delayed')
self.set_model_parameters(self.delayed_model_parameters)
if self.delayed_model_state_dict_still_to_be_set:
print('Setting model state dict, delayed')
self.set_model_state_dict(self.delayed_model_state_dict)
# this allows passing addtional parameters to the smoothers for all models and smoothers
self.model.set_dictionary_to_pass_to_integrator(self._get_dictionary_to_pass_to_integrator())
self.criterion.set_dictionary_to_pass_to_smoother(self._get_dictionary_to_pass_to_integrator())
# optimize for a few steps
start = time.time()
self.last_energy = None
could_not_find_successful_step = False
if not self._use_external_scheduler:
self.use_step_size_scheduler = self.params['optimizer'][('use_step_size_scheduler',True,'If set to True the step sizes are reduced if no progress is made')]
if self.use_step_size_scheduler:
self.params['optimizer'][('scheduler', {}, 'parameters for the ReduceLROnPlateau scheduler')]
self.scheduler_verbose = self.params['optimizer']['scheduler'][
('verbose', True, 'if True prints out changes in learning rate')]
self.scheduler_factor = self.params['optimizer']['scheduler'][('factor', 0.5, 'reduction factor')]
self.scheduler_patience = self.params['optimizer']['scheduler'][
('patience', 10, 'how many steps without reduction before LR is changed')]
if self.use_step_size_scheduler and self.scheduler is None:
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer_instance, 'min',
verbose=self.scheduler_verbose,
factor=self.scheduler_factor,
patience=self.scheduler_patience)
self.iter_count = 0
for iter in range(self.nrOfIterations):
# take a step of the optimizer
# for p in self.optimizer_instance._params:
# p.data = p.data.float()
current_loss = self.optimizer_instance.step(self._closure)
# do weight clipping if it is desired
self._do_weight_clipping()
# an external scheduler may for example be used in batch optimization
if not self._use_external_scheduler:
if self.use_step_size_scheduler:
self.scheduler.step(current_loss.data[0])
if hasattr(self.optimizer_instance,'last_step_size_taken'):
self.last_successful_step_size_taken = self.optimizer_instance.last_step_size_taken()
if self.last_successful_step_size_taken==0.0:
print('Optimizer was not able to find a successful step. Stopping iterations.')
could_not_find_successful_step = True
if iter==0:
print('The gradient was likely too large or the optimization started from an optimal point.')
print('If this behavior is unexpected try adjusting the settings of the similiarity measure or allow the optimizer to try out smaller steps.')
# to make sure warped images and the map is correct, call closure once more
self._closure()
if self.useMap:
vis_arg = self.rec_phiWarped
else:
vis_arg = self.rec_IWarped
tolerance_reached, was_visualized = self.analysis(self.rec_energy, self.rec_similarityEnergy,
self.rec_regEnergy, self.rec_opt_par_loss_energy,
vis_arg,
self.rec_custom_optimizer_output_string,
self.rec_custom_optimizer_output_values)
if tolerance_reached or could_not_find_successful_step:
if tolerance_reached:
print('Terminating optimization, because the desired tolerance was reached.')
# force the output of the last image in this case, if it has not been visualized previously
if not was_visualized and (self.visualize or self.save_fig):
_, _ = self.analysis(self.rec_energy, self.rec_similarityEnergy,
self.rec_regEnergy, self.rec_opt_par_loss_energy,
vis_arg,
self.rec_custom_optimizer_output_string,
self.rec_custom_optimizer_output_values,
force_visualization=True)
break
self.iter_count = iter+1
if self.show_iteration_output:
cprint('-->Elapsed time {:.5f}[s]'.format(time.time() - start), 'green')
class SingleScaleBatchRegistrationOptimizer(ImageRegistrationOptimizer):
def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):
super(SingleScaleBatchRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
self.params[('optimizer', {}, 'optimizer settings')]
cparams = self.params['optimizer']
cparams[('batch_settings', {}, 'settings for the batch or optimizer')]
cparams = cparams['batch_settings']
self.batch_size = cparams[('batch_size',2,'how many images per batch (if set larger or equal to the number of images, it will be processed as one batch')]
"""how many images per batch"""
self.shuffle = cparams[('shuffle', True, 'if batches should be shuffled between epochs')]
"""shuffle batches between epochshow many images per batch"""
self.num_workers = cparams[('num_workers',0,'Number of workers to read the data. Set it to zero on the GPU or use >0 at your own risk.')]
"""number of workers to read the data"""
self.nr_of_epochs = cparams[('nr_of_epochs', 1,'how many epochs')]
"""how many iterations for batch; i.e., how often to iterate over the entire dataset = epochs"""
self.parameter_output_dir = cparams[('parameter_output_dir','parameters','output directory to store the shared and the individual parameters during the iterations')]
"""output directory to store the shared and the individual parameters during the iterations"""
self.individual_parameter_output_dir = os.path.join(self.parameter_output_dir,'individual')
self.shared_parameter_output_dir = os.path.join(self.parameter_output_dir,'shared')
self.start_from_previously_saved_parameters = cparams[('start_from_previously_saved_parameters',True,'If set to true checks already for the first batch of files in the output directories exist and uses them if they do.')]
"""If true then checks if previously saved parameter files exists and load them at the beginning already"""
self.individual_checkpoint_output_directory = os.path.join(self.individual_parameter_output_dir,'checkpoints')
self.shared_checkpoint_output_directory = os.path.join(self.shared_parameter_output_dir,'checkpoints')
self.checkpoint_interval = cparams[('checkpoint_interval',0,'after how many epochs, checkpoints are saved; if set to 0, checkpoint will not be saved')]
"""after how many epochs checkpoints are saved"""
self.verbose_output = cparams[('verbose_output',False,'turns on verbose output')]
self.show_sample_optimizer_output = cparams[('show_sample_optimizer_output',False,'If true shows the energies during optimizaton of a sample')]
"""Shows iterations for each sample being optimized"""
self.also_eliminate_shared_state_between_samples_during_first_epoch = \
self.params['optimizer']['sgd'][('also_eliminate_shared_state_between_samples_during_first_epoch', False,
'if set to true all states are eliminated, otherwise only the individual ones')]
self.use_step_size_scheduler = self.params['optimizer'][('use_step_size_scheduler', True, 'If set to True the step sizes are reduced if no progress is made')]
self.scheduler = None
if self.use_step_size_scheduler:
self.params['optimizer'][('scheduler', {}, 'parameters for the ReduceLROnPlateau scheduler')]
self.scheduler_verbose = self.params['optimizer']['scheduler'][
('verbose', True, 'if True prints out changes in learning rate')]
self.scheduler_factor = self.params['optimizer']['scheduler'][('factor', 0.75, 'reduction factor')]
self.scheduler_patience = self.params['optimizer']['scheduler'][
('patience', 5, 'how many steps without reduction before LR is changed')]
self.model_name = None
self.add_model_name = None
self.add_model_networkClass = None
self.add_model_lossClass = None
self.addSimName = None
self.addSimMeasure = None
self.ssOpt = None
def write_parameters_to_settings(self):
if self.ssOpt is not None:
self.ssOpt.write_parameters_to_settings()
def add_similarity_measure(self, simName, simMeasure):
"""
Adds a custom similarity measure
:param simName: name of the similarity measure (string)
:param simMeasure: the similarity measure itself (an object that can be instantiated)
"""
self.addSimName = simName
self.addSimMeasure = simMeasure
def set_model(self, modelName):
"""
Sets the model that should be solved
:param modelName: name of the model that should be solved (string)
"""
self.model_name = modelName
def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass):
"""
Adds a custom model to be optimized over
:param add_model_name: name of the model (string)
:param add_model_networkClass: network model itself (as an object that can be instantiated)
:param add_model_lossClass: loss of the model (as an object that can be instantiated)
"""
self.add_model_name = add_model_name
self.add_model_networkClass = add_model_networkClass
self.add_model_lossClass = add_model_lossClass
def get_checkpoint_dict(self):
d = super(SingleScaleBatchRegistrationOptimizer, self).get_checkpoint_dict()
if self.ssOpt is not None:
d['shared_parameters'] = self.ssOpt.get_shared_model_parameters_and_buffers()
return d
def load_checkpoint_dict(self, d, load_optimizer_state=False):
super(SingleScaleBatchRegistrationOptimizer, self).load_checkpoint_dict(d)
if 'shared_parameters' in d:
if self.ssOpt is not None:
self.ssOpt.set_shared_model_parameters(d['shared_parameters'])
else:
raise ValueError('checkpoint does not contain: consensus_dual')
def get_warped_image(self):
"""
Returns the warped image
:return: the warped image
"""
p = dict()
p['warped_images'] = []
print('get_warped_image: not yet implemented')
return p
def get_map(self):
"""
Returns the deformation map
:return: deformation map
"""
p = dict()
p['phi'] = []
print('get_map: not yet implemented')
return p
def get_inverse_map(self):
"""
Returns the inverse deformation map
:return: deformation map
"""
p = dict()
p['phi_inv'] = []
print('get_inverse_map: not yet implemented')
return p
def get_model_parameters(self):
"""
Returns the parameters of the model
:return: model parameters
"""
p = dict()
if self.ssOpt is not None:
p['shared_parameters'] = self.ssOpt.get_shared_model_parameters_and_buffers()
return p
def set_model_parameters(self, p):
raise ValueError('Setting model parameters not yet supported by batch optimizer')
def _set_all_still_missing_parameters(self):
if self.model_name is None:
model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map',
"['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'")]
self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False')]
self.set_model(model_name)
if self.optimizer_name is None:
self.optimizer_name = self.params['optimizer'][('name', 'sgd', 'Optimizer (lbfgs|adam|sgd)')]
self.optimizer_has_been_initialized = True
def _create_single_scale_optimizer(self,batch_size):
ssOpt = SingleScaleRegistrationOptimizer(batch_size, self.spacing, self.useMap, self.mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map, default_learning_rate=self.default_learning_rate)
if ((self.add_model_name is not None) and
(self.add_model_networkClass is not None) and
(self.add_model_lossClass is not None)):
ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass)
# now set the actual model we want to solve
ssOpt.set_model(self.model_name)
if (self.addSimName is not None) and (self.addSimMeasure is not None):
ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)
if self.optimizer_name is not None:
ssOpt.set_optimizer_by_name(self.optimizer_name)
else:
raise ValueError('Optimizers need to be specified by name of consensus optimization at the moment.')
ssOpt.set_rel_ftol(self.get_rel_ftol())
ssOpt.set_visualization(self.get_visualization())
ssOpt.set_visualize_step(self.get_visualize_step())
return ssOpt
def _get_individual_checkpoint_filenames(self,output_directory,idx,epoch_iter):
filenames = []
for v in idx:
filenames.append(os.path.join(output_directory,'checkpoint_individual_parameter_pair_{:05d}_epoch_{:05d}.pt'.format(v,epoch_iter)))
return filenames
def _get_shared_checkpoint_filename(self,output_directory,epoch_iter):
filename = os.path.join(output_directory,'checkpoint_shared_parameters_epoch_{:05d}.pt'.format(epoch_iter))
return filename
def _create_all_output_directories(self):
if not os.path.exists(self.parameter_output_dir):
os.makedirs(self.parameter_output_dir)
print('Creating directory: ' + self.parameter_output_dir)
if not os.path.exists(self.individual_parameter_output_dir):
os.makedirs(self.individual_parameter_output_dir)
print('Creating directory: ' + self.individual_parameter_output_dir)
if not os.path.exists(self.shared_parameter_output_dir):
os.makedirs(self.shared_parameter_output_dir)
print('Creating directory: ' + self.shared_parameter_output_dir)
if not os.path.exists(self.individual_checkpoint_output_directory):
os.makedirs(self.individual_checkpoint_output_directory)
print('Creating directory: ' + self.individual_checkpoint_output_directory)
if not os.path.exists(self.shared_checkpoint_output_directory):
os.makedirs(self.shared_checkpoint_output_directory)
print('Creating directory: ' + self.shared_checkpoint_output_directory)
def _get_shared_parameter_filename(self,output_dir):
return os.path.join(output_dir,'shared_parameters.pt')
def optimize(self):
"""
The optimizer to optimize over batches of images
:return: n/a
"""
#todo: maybe switch loading and writing individual parameters to individual states; this would assure that all states (such as running averages, etc.) are included and not only parameters
if self.optimizer is not None:
raise ValueError('Custom optimizers are currently not supported for batch optimization.\
Set the optimizer by name (e.g., in the json configuration) instead. Should be some form of stochastic gradient descent.')
self._set_all_still_missing_parameters()
self._create_all_output_directories()
iter_offset = 0
if torch.is_tensor(self.ISource) or torch.is_tensor(self.ITarget):
raise ValueError('Batch optimizer expects lists of filenames as inputs for the source and target images')
registration_data_set = OD.PairwiseRegistrationDataset(output_directory=self.individual_parameter_output_dir,
source_image_filenames=self.ISource,
target_image_filenames=self.ITarget,
params=self.params)
nr_of_datasets = len(registration_data_set)
if nr_of_datasets<self.batch_size:
print('INFO: nr of datasets is smaller than batch-size. Reducing batch size to ' + str(nr_of_datasets))
self.batch_size=nr_of_datasets
if nr_of_datasets%self.batch_size!=0:
raise ValueError('nr_of_datasets = {}; batch_size = {}: Number of registration pairs needs to be divisible by the batch size.'.format(nr_of_datasets,self.batch_size))
dataloader = DataLoader(registration_data_set, batch_size=self.batch_size,
shuffle=self.shuffle, num_workers=self.num_workers)
self.ssOpt = None
last_batch_size = None
nr_of_samples = nr_of_datasets//self.batch_size
last_energy = None
last_sim_energy = None
last_reg_energy = None
last_opt_energy = None
shared_parameter_filename = self._get_shared_parameter_filename(self.shared_parameter_output_dir)
load_individual_parameters_during_first_epoch = False
load_shared_parameters_before_first_epoch = False
if self.start_from_previously_saved_parameters:
# check if there are files in the output_directory
has_all_filenames = True
for idx in range(len(self.ISource)):
cur_filename = registration_data_set._get_parameter_filename(idx)
if not os.path.isfile(cur_filename):
has_all_filenames = False
break
load_individual_parameters_during_first_epoch = has_all_filenames
load_shared_parameters_before_first_epoch = os.path.isfile(shared_parameter_filename)
if load_individual_parameters_during_first_epoch:
print('INFO: Will load the individual parameters from the previous run in directory ' + self.individual_parameter_output_dir + ' for initialization.')
else:
print('INFO: Will NOT load the individual parameters from the previous run in directory ' + self.individual_parameter_output_dir + ' for initialization.')
if load_shared_parameters_before_first_epoch:
print('INFO: Will load the shared parameter file ' + shared_parameter_filename + ' before computing the first epoch')
else:
print('INFO: Will NOT load the shared parameter file ' + shared_parameter_filename + ' before computing the first epoch')
for iter_epoch in range(iter_offset,self.nr_of_epochs+iter_offset):
if self.verbose_output:
print('Computing epoch ' + str(iter_epoch + 1) + ' of ' + str(iter_offset+self.nr_of_epochs))
cur_running_energy = 0.0
cur_running_sim_energy = 0.0
cur_running_reg_energy = 0.0
cur_running_opt_energy = 0.0
cur_min_energy = None
cur_max_energy = None
cur_min_sim_energy = None
cur_max_sim_energy = None
cur_min_reg_energy = None
cur_max_reg_energy = None
cur_min_opt_energy = None
cur_max_opt_energy = None
for i, sample in enumerate(dataloader, 0):
# get the data from the dataloader
current_source_batch = AdaptVal(sample['ISource'])
current_target_batch = AdaptVal(sample['ITarget'])
# create the optimizer
batch_size = current_source_batch.size()
if (batch_size != last_batch_size) and (last_batch_size is not None):
raise ValueError('Ooops, this should not have happened.')
initialize_optimizer = False
if (batch_size != last_batch_size) or (self.ssOpt is None):
initialize_optimizer = True
# we need to create a new optimizer; otherwise optimizer already exists
self.ssOpt = self._create_single_scale_optimizer(batch_size)
# images need to be set before calling _set_all_still_missing_parameters
self.ssOpt.set_source_image(current_source_batch)
self.ssOpt.set_target_image(current_target_batch)
self.ssOpt.set_current_epoch(iter_epoch)
if initialize_optimizer:
# to make sure we have the model initialized, force parameter installation
self.ssOpt._set_all_still_missing_parameters()
# since this is chunked-up we increase the patience
self.ssOpt._set_use_external_scheduler()
if self.show_sample_optimizer_output:
self.ssOpt.turn_iteration_output_on()
else:
self.ssOpt.turn_iteration_output_off()
if self.use_step_size_scheduler and self.scheduler is None:
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.ssOpt.optimizer_instance, 'min',
verbose=self.scheduler_verbose,
factor=self.scheduler_factor,
patience=self.scheduler_patience)
if load_shared_parameters_before_first_epoch:
print('Loading the shared parameters/state.')
self.ssOpt.load_shared_state_dict(torch.load(shared_parameter_filename))
last_batch_size = batch_size
if iter_epoch!=0 or load_individual_parameters_during_first_epoch: # only load the individual parameters after the first epoch
if 'individual_parameter' in sample:
current_individual_parameters = sample['individual_parameter']
if current_individual_parameters is not None:
if self.verbose_output:
print('INFO: loading current individual optimizer state')
self.ssOpt.set_sgd_individual_model_parameters_and_optimizer_states(current_individual_parameters)
else:
print('WARNING: could not find previous parameter file')
else:
# this is the case when optimization is run for the first time for a batch or if previous results should not be used
# In this case we want to have a fresh start for the initial conditions
par_file = os.path.join(self.individual_parameter_output_dir,'default_init.pt')
if i==0:
# this is the first time, so we store the individual parameters
torch.save(self.ssOpt.get_individual_model_parameters(),par_file)
else:
# now we load them
if self.verbose_output:
print('INFO: forcing the initial individual parameters to default')
self.ssOpt.set_individual_model_parameters(torch.load(par_file))
# and we need to kill the optimizer state (to get rid of the previous momentum)
if self.also_eliminate_shared_state_between_samples_during_first_epoch:
if self.verbose_output:
print('INFO: discarding the entire optimizer state')
self.ssOpt.optimizer_instance.state = defaultdict(dict)
else:
if self.verbose_output:
print('INFO: discarding current *individual* optimizer states only')
self.ssOpt._remove_state_variables_for_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states())
if self.visualize:
if i == 0:
# to avoid excessive graphical output
self.ssOpt.turn_visualization_on()
else:
self.ssOpt.turn_visualization_off()
else:
self.ssOpt.turn_visualization_off()
self.ssOpt.optimize()
cur_energy,cur_sim_energy,cur_reg_energy = self.ssOpt.get_energy()
cur_opt_energy = self.ssOpt.get_opt_par_energy()
cur_running_energy += 1./nr_of_samples*cur_energy
cur_running_sim_energy += 1./nr_of_samples*cur_sim_energy
cur_running_reg_energy += 1./nr_of_samples*cur_reg_energy
cur_running_opt_energy += 1./nr_of_samples*cur_opt_energy
if i==0:
cur_min_energy = cur_energy
cur_max_energy = cur_energy
cur_min_sim_energy = cur_sim_energy
cur_max_sim_energy = cur_sim_energy
cur_min_reg_energy = cur_reg_energy
cur_max_reg_energy = cur_reg_energy
cur_min_opt_energy = cur_opt_energy
cur_max_opt_energy = cur_opt_energy
else:
cur_min_energy = min(cur_energy,cur_min_energy)
cur_max_energy = max(cur_energy,cur_max_energy)
cur_min_sim_energy = min(cur_sim_energy,cur_min_sim_energy)
cur_max_sim_energy = max(cur_sim_energy,cur_max_sim_energy)
cur_min_reg_energy = min(cur_reg_energy,cur_min_reg_energy)
cur_max_reg_energy = max(cur_reg_energy,cur_max_reg_energy)
cur_min_opt_energy = min(cur_opt_energy,cur_min_opt_energy)
cur_max_opt_energy = max(cur_opt_energy,cur_max_opt_energy)
# need to save this index by index so we can shuffle
self.ssOpt._write_out_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states(),sample['individual_parameter_filename'])
if self.checkpoint_interval>0:
if (iter_epoch%self.checkpoint_interval==0) or (iter_epoch==self.nr_of_epochs+iter_offset-1):
if self.verbose_output:
print('Writing out individual checkpoint data for epoch ' + str(iter_epoch) + ' for sample ' + str(i+1) + '/' + str(nr_of_samples))
individual_filenames = self._get_individual_checkpoint_filenames(self.individual_checkpoint_output_directory,sample['idx'],iter_epoch)
self.ssOpt._write_out_individual_parameters(self.ssOpt.get_sgd_individual_model_parameters_and_optimizer_states(),individual_filenames)
if i==nr_of_samples-1:
if self.verbose_output:
print('Writing out shared checkpoint data for epoch ' + str(iter_epoch))
shared_filename = self._get_shared_checkpoint_filename(self.shared_checkpoint_output_directory,iter_epoch)
self.ssOpt._write_out_shared_parameters(self.ssOpt.get_sgd_shared_model_parameters(),shared_filename)
if self.show_sample_optimizer_output:
if (last_energy is not None) and (last_sim_energy is not None) and (last_reg_energy is not None):
print('\n\nEpoch {:05d}: Last energies : E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}], optE=[{:2.5f}]'\
.format(iter_epoch-1,last_energy,last_sim_energy,last_reg_energy,last_opt_energy))
print(' / image: Last energies : E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}]' \
.format(last_energy/batch_size[0], last_sim_energy/batch_size[0], last_reg_energy/batch_size[0]))
else:
print('\n\n')
last_energy = cur_running_energy
last_sim_energy = cur_running_sim_energy
last_reg_energy = cur_running_reg_energy
last_opt_energy = cur_running_opt_energy
if self.show_sample_optimizer_output:
print('Epoch {:05d}: Current energies: E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}], optE=[{:2.5f}]'\
.format(iter_epoch,last_energy, last_sim_energy,last_reg_energy,last_opt_energy))
print(' / image: Current energies: E=[{:2.5f}], simE=[{:2.5f}], regE=[{:2.5f}]' \
.format(last_energy/batch_size[0], last_sim_energy/batch_size[0], last_reg_energy/batch_size[0]))
else:
print('Epoch {:05d}: Current energies: E={:2.5f}:[{:1.2f},{:1.2f}], simE={:2.5f}:[{:1.2f},{:1.2f}], regE={:2.5f}:[{:1.2f},{:1.2f}], optE={:1.2f}:[{:1.2f},{:1.2f}]'\
.format(iter_epoch, last_energy, cur_min_energy, cur_max_energy,
last_sim_energy, cur_min_sim_energy, cur_max_sim_energy,
last_reg_energy, cur_min_reg_energy, cur_max_reg_energy,
last_opt_energy, cur_min_opt_energy, cur_max_opt_energy))
print(' / image: Current energies: E={:2.5f}:[{:1.2f},{:1.2f}], simE={:2.5f}:[{:1.2f},{:1.2f}], regE={:2.5f}:[{:1.2f},{:1.2f}]' \
.format(last_energy/batch_size[0], cur_min_energy/batch_size[0], cur_max_energy/batch_size[0],
last_sim_energy/batch_size[0], cur_min_sim_energy/batch_size[0], cur_max_sim_energy/batch_size[0],
last_reg_energy/batch_size[0], cur_min_reg_energy/batch_size[0], cur_max_reg_energy/batch_size[0]))
if self.show_sample_optimizer_output:
print('\n\n')
if self.use_step_size_scheduler:
self.scheduler.step(last_energy)
print('Writing out shared parameter/state file to ' + shared_parameter_filename )
torch.save(self.ssOpt.shared_state_dict(),shared_parameter_filename)
class SingleScaleConsensusRegistrationOptimizer(ImageRegistrationOptimizer):
def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None):
super(SingleScaleConsensusRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
self.params[('optimizer', {}, 'optimizer settings')]
cparams = self.params['optimizer']
cparams[('consensus_settings', {}, 'settings for the consensus optimizer')]
cparams = cparams['consensus_settings']
self.sigma = cparams[('sigma', 1.0, 'sigma/2 is multiplier for squared augmented Lagrangian penalty')]
"""Multiplier for squared augmented Lagrangian penalty"""
self.nr_of_epochs = cparams[('nr_of_epochs', 1, 'how many iterations for consensus; i.e., how often to iterate over the entire dataset')]
"""how many iterations for consensus; i.e., how often to iterate over the entire dataset"""
self.batch_size = cparams[('batch_size',1,'how many images per batch (if set larger or equal to the number of images, it will be processed as one batch')]
"""how many images per batch"""
self.save_intermediate_checkpoints = cparams[('save_intermediate_checkpoints',False,'when set to True checkpoints are retained for each batch iterations')]
"""when set to True checkpoints are retained for each batch iterations"""
self.checkpoint_output_directory = cparams[('checkpoint_output_directory','checkpoints','directory where the checkpoints will be stored')]
"""output directory where the checkpoints will be saved"""
self.save_consensus_state_checkpoints = cparams[('save_consensus_state_checkpoints',True,'saves the current consensus state; typically only the individual states are saved as checkpoints')]
"""saves the current consensus state; typically only the individual states are saved as checkpoints"""
self.continue_from_last_checkpoint = cparams[('continue_from_last_checkpoint',False,'If true then iterations are resumed from last checkpoint. Allows restarting an optimization')]
"""allows restarting an optimization by continuing from the last checkpoint"""
self.load_optimizer_state_from_checkpoint = cparams[('load_optimizer_state_from_checkpoint',True,'If set to False only the state of the model is loaded when resuming from a checkpoint')]
"""If set to False only the state of the model is loaded when resuming from a checkpoint"""
self.nr_of_batches = None
self.nr_of_images = None
self.current_consensus_state = None
self.current_consensus_dual = None
self.next_consensus_state = None
self.last_shared_state = None
self.model_name = None
self.add_model_name = None
self.add_model_networkClass = None
self.add_model_lossClass = None
self.addSimName = None
self.addSimMeasure = None
self.iter_offset = None
self.ssOpt = None
def write_parameters_to_settings(self):
if self.ssOpt is not None:
self.ssOpt.write_parameters_to_settings()
def _consensus_penalty_loss(self,shared_model_parameters):
"""
This allows to define additional terms for the loss which are based on parameters that are shared
between models (for example for the smoother). Can be used to define a form of consensus optimization.
:param shared_model_parameters: parameters that have been declared shared in a model
:return: 0 by default, otherwise the corresponding penalty
"""
additional_loss = MyTensor(1).zero_()
total_number_of_parameters = 1
for k in shared_model_parameters:
total_number_of_parameters += shared_model_parameters[k].numel()
additional_loss += ((shared_model_parameters[k]\
-self.current_consensus_state[k]\
-self.current_consensus_dual[k])**2).sum()
additional_loss *= self.sigma/(2.0*total_number_of_parameters)
#print('sigma=' + str(self.sigma) + '; additional loss = ' + str( additional_loss.data.cpu().numpy()))
return additional_loss
def _set_state_to_zero(self,state):
# set all the individual parameters to zero
for k in state:
state[k].zero_()
def _add_scaled_difference_to_state(self,state,model_shared_state,current_dual,scaling_factor):
for k in state:
state[k] += scaling_factor*(model_shared_state[k]-current_dual[k])
def _create_single_scale_optimizer(self,batch_size,consensus_penalty):
ssOpt = SingleScaleRegistrationOptimizer(batch_size, self.spacing, self.useMap, self.mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map, default_learning_rate=self.default_learning_rate)
if ((self.add_model_name is not None) and
(self.add_model_networkClass is not None) and
(self.add_model_lossClass is not None)):
ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass)
# now set the actual model we want to solve
ssOpt.set_model(self.model_name)
if (self.addSimName is not None) and (self.addSimMeasure is not None):
ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)
# setting the optimizer
#if self.optimizer is not None:
# ssOpt.set_optimizer(self.optimizer)
# ssOpt.set_optimizer_params(self.optimizer_params)
#elif self.optimizer_name is not None:
if self.optimizer_name is not None:
ssOpt.set_optimizer_by_name(self.optimizer_name)
else:
raise ValueError('Optimizers need to be specified by name of consensus optimization at the moment.')
ssOpt.set_rel_ftol(self.get_rel_ftol())
ssOpt.set_visualization(self.get_visualization())
ssOpt.set_visualize_step(self.get_visualize_step())
if consensus_penalty:
ssOpt.set_external_optimizer_parameter_loss(self._consensus_penalty_loss)
return ssOpt
def _initialize_consensus_variables_if_needed(self,ssOpt):
if self.current_consensus_state is None:
self.current_consensus_state = copy.deepcopy(ssOpt.get_shared_model_parameters())
self._set_state_to_zero(self.current_consensus_state)
if self.current_consensus_dual is None:
self.current_consensus_dual = copy.deepcopy(self.current_consensus_state)
self._set_state_to_zero(self.current_consensus_dual)
if self.last_shared_state is None:
self.last_shared_state = copy.deepcopy(self.current_consensus_state)
self._set_state_to_zero(self.last_shared_state)
if self.next_consensus_state is None:
self.next_consensus_state = copy.deepcopy(self.current_consensus_dual) # also make it zero
self._set_state_to_zero(self.next_consensus_state)
def add_similarity_measure(self, simName, simMeasure):
"""
Adds a custom similarity measure
:param simName: name of the similarity measure (string)
:param simMeasure: the similarity measure itself (an object that can be instantiated)
"""
self.addSimName = simName
self.addSimMeasure = simMeasure
def set_model(self, modelName):
"""
Sets the model that should be solved
:param modelName: name of the model that should be solved (string)
"""
self.model_name = modelName
def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass):
"""
Adds a custom model to be optimized over
:param add_model_name: name of the model (string)
:param add_model_networkClass: network model itself (as an object that can be instantiated)
:param add_model_lossClass: loss of the model (as an object that can be instantiated)
"""
self.add_model_name = add_model_name
self.add_model_networkClass = add_model_networkClass
self.add_model_lossClass = add_model_lossClass
def get_checkpoint_dict(self):
d = super(SingleScaleConsensusRegistrationOptimizer, self).get_checkpoint_dict()
d['consensus_dual'] = self.current_consensus_dual
return d
def load_checkpoint_dict(self, d, load_optimizer_state=False):
super(SingleScaleConsensusRegistrationOptimizer, self).load_checkpoint_dict(d)
if 'consensus_dual' in d:
self.current_consensus_dual = d['consensus_dual']
else:
raise ValueError('checkpoint does not contain: consensus_dual')
def _custom_load_checkpoint(self,ssOpt,filename):
d = torch.load(filename)
ssOpt.load_checkpoint_dict(d)
self.load_checkpoint_dict(d)
def _custom_single_batch_load_checkpoint(self,ssOpt,filename):
d = torch.load(filename)
if self.load_optimizer_state_from_checkpoint:
ssOpt.load_checkpoint_dict(d,load_optimizer_state=True)
def _custom_save_checkpoint(self,ssOpt,filename):
sd = ssOpt.get_checkpoint_dict()
# todo: maybe make this optional to save storage
sd['res'] = dict()
sd['res']['Iw'] = ssOpt.get_warped_image()
sd['res']['phi'] = ssOpt.get_map()
cd = self.get_checkpoint_dict()
# now merge these two dictionaries
sd.update(cd)
# and now save it
torch.save(sd,filename)
def _copy_state(self,state_to,state_from):
for key in state_to:
if key in state_from:
state_to[key].copy_(state_from[key])
else:
raise ValueError('Could not copy key ' + key)
def _set_all_still_missing_parameters(self):
if self.model_name is None:
model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', "['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'")]
self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]
self.set_model( model_name )
if self.optimizer_name is None:
self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]
self.optimizer_has_been_initialized = True
def get_warped_image(self):
"""
Returns the warped image
:return: the warped image
"""
p = dict()
p['warped_images'] = []
for current_batch in range(self.nr_of_batches):
current_checkpoint_filename = self._get_checkpoint_filename(current_batch, self.iter_offset+self.nr_of_epochs - 1)
dc = torch.load(current_checkpoint_filename)
p['warped_images'].append(dc['res']['Iw'])
return p
def get_map(self):
"""
Returns the deformation map
:return: deformation map
"""
p = dict()
p['phi'] = []
for current_batch in range(self.nr_of_batches):
current_checkpoint_filename = self._get_checkpoint_filename(current_batch, self.iter_offset+self.nr_of_epochs - 1)
dc = torch.load(current_checkpoint_filename)
p['phi'].append(dc['res']['phi'])
return p
def get_model_parameters(self):
"""
Returns the parameters of the model
:return: model parameters
"""
p = dict()
p['consensus_state'] = self.current_consensus_state
p['registration_pars'] = []
for current_batch in range(self.nr_of_batches):
current_checkpoint_filename = self._get_checkpoint_filename(current_batch,self.iter_offset+self.nr_of_epochs-1)
dc = torch.load(current_checkpoint_filename)
d = dict()
d['model'] = dc['model']
d['consensus_dual'] = dc['consensus_dual']
p['registration_pars'].append(d)
return p
def set_model_parameters(self, p):
raise ValueError('Setting model parameters not yet supported by consensus optimizer')
def _get_checkpoint_filename(self,batch_nr,batch_iter):
if self.save_intermediate_checkpoints:
return os.path.join(self.checkpoint_output_directory,
"checkpoint_batch{:05d}_iter{:05d}.pt".format(batch_nr,batch_iter))
else:
return os.path.join(self.checkpoint_output_directory,
"checkpoint_batch{:05d}.pt".format(batch_nr))
def _get_consensus_checkpoint_filename(self,batch_iter):
return os.path.join(self.checkpoint_output_directory,
"consensus_state_iter{:05d}.pt".format(batch_iter))
def _optimize_as_single_batch(self,resume_from_iter=None):
"""
Does optimization where everything is represented as a single batch. This is essentially like an individual
optimization, but supports checkpointing.
:param resume_from_iter: resumes computations from this iteration (assumes the corresponding checkpoint exists here)
:return: n/a
"""
if resume_from_iter is not None:
self.iter_offset = resume_from_iter+1
print('Resuming from checkpoint iteration: ' + str(resume_from_iter))
else:
self.iter_offset = 0
for iter_batch in range(self.iter_offset,self.nr_of_epochs+self.iter_offset):
print('Computing epoch ' + str(iter_batch + 1) + ' of ' + str(self.iter_offset+self.nr_of_epochs))
all_histories = []
current_batch = 0 # there is only one batch, this one
current_source_batch = self.ISource[:, ...].data
current_target_batch = self.ITarget[:, ...].data
current_batch_image_size = np.array(current_source_batch.size())
# there is not consensus penalty here as this is technically not consensus optimization
# todo: could ultimately replace the single scale optimizer; here used to write out checkpoints
self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size, consensus_penalty=False)
# needs to be set before calling _set_all_still_missing_parameters
self.ssOpt.set_source_image(current_source_batch)
self.ssOpt.set_target_image(current_target_batch)
# to make sure we have the model initialized, force parameter installation
self.ssOpt._set_all_still_missing_parameters()
# this loads the optimizer state and the model state, but here not the self.current_consensus_dual
if iter_batch>0:
previous_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch - 1)
self._custom_single_batch_load_checkpoint(self.ssOpt, previous_checkpoint_filename)
self.ssOpt.optimize()
if (current_batch == self.nr_of_batches - 1) and (iter_batch == self.nr_of_epochs - 1):
# the last time we run this
all_histories.append(self.ssOpt.get_history())
current_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch)
self._custom_save_checkpoint(self.ssOpt, current_checkpoint_filename)
self._add_to_history('batch_history', copy.deepcopy(all_histories))
def _optimize_with_multiple_batches(self, resume_from_iter=None):
"""
Does consensus optimization over multiple batches.
:param resume_from_iter: resumes computations from this iteration (assumes the corresponding checkpoint exists here)
:return: n/a
"""
if resume_from_iter is not None:
iter_offset = resume_from_iter+1
print('Resuming from checkpoint iteration: ' + str(resume_from_iter))
else:
iter_offset = 0
for iter_batch in range(iter_offset,self.nr_of_epochs+iter_offset):
print('Computing epoch ' + str(iter_batch+1) + ' of ' + str(iter_offset+self.nr_of_epochs))
next_consensus_initialized = False
all_histories = []
for current_batch in range(self.nr_of_batches):
from_image = current_batch*self.batch_size
to_image = min(self.nr_of_images,(current_batch+1)*self.batch_size)
nr_of_images_in_batch = to_image-from_image
current_source_batch = self.ISource[from_image:to_image, ...].data
current_target_batch = self.ITarget[from_image:to_image, ...].data
current_batch_image_size = np.array(current_source_batch.size())
print('Computing image pair batch ' + str(current_batch+1) + ' of ' + str(self.nr_of_batches) +
' of batch iteration ' + str(iter_batch+1) + ' of ' + str(iter_offset+self.nr_of_epochs))
print('Image range: [' + str(from_image) + ',' + str(to_image) + ')')
# create new optimizer
if iter_batch==0:
# do not apply the penalty the first time around
self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size,consensus_penalty=False)
else:
self.ssOpt = self._create_single_scale_optimizer(current_batch_image_size,consensus_penalty=True)
# to make sure we have the model initialized, force parameter installation
self.ssOpt._set_all_still_missing_parameters()
if iter_batch==0:
# in the first round just initialize the shared state with what was computed previously
if self.last_shared_state is not None:
self.ssOpt.set_shared_model_parameters(self.last_shared_state)
self._initialize_consensus_variables_if_needed(self.ssOpt)
if not next_consensus_initialized:
self._set_state_to_zero(self.next_consensus_state)
next_consensus_initialized = True
if iter_batch==0:
# for the first time, just set the dual to zero
self._set_state_to_zero(self.current_consensus_dual)
# load the last
else:
# this loads the optimizer state and the model state and also self.current_consensus_dual
previous_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch-1)
self._custom_load_checkpoint(self.ssOpt,previous_checkpoint_filename)
# first update the dual variable (we do this now that we have the consensus state still
self._add_scaled_difference_to_state(self.current_consensus_dual,
self.ssOpt.get_shared_model_parameters(),
self.current_consensus_state,-1.0)
self.ssOpt.set_source_image(current_source_batch)
self.ssOpt.set_target_image(current_target_batch)
self.ssOpt.optimize()
self._copy_state(self.last_shared_state,self.ssOpt.get_shared_model_parameters())
if (current_batch==self.nr_of_batches-1) and (iter_batch==self.nr_of_epochs-1):
# the last time we run this
all_histories.append( self.ssOpt.get_history() )
# update the consensus state (is done via next_consensus_state as
# self.current_consensus_state is used as part of the optimization for all optimizations in the batch
self._add_scaled_difference_to_state(self.next_consensus_state,
self.ssOpt.get_shared_model_parameters(),
self.current_consensus_dual,float(nr_of_images_in_batch)/float(self.nr_of_images))
current_checkpoint_filename = self._get_checkpoint_filename(current_batch, iter_batch)
self._custom_save_checkpoint(self.ssOpt,current_checkpoint_filename)
self._add_to_history('batch_history', copy.deepcopy(all_histories))
self._copy_state(self.current_consensus_state, self.next_consensus_state)
if self.save_consensus_state_checkpoints:
consensus_filename = self._get_consensus_checkpoint_filename(iter_batch)
torch.save({'consensus_state':self.current_consensus_state},consensus_filename)
def _get_checkpoint_iter_with_complete_batch(self,start_at_iter):
if start_at_iter<0:
print('Could NOT find a complete checkpoint batch.')
return None
is_complete_batch = True
for current_batch in range(self.nr_of_batches):
cfilename = self._get_checkpoint_filename(current_batch, start_at_iter)
if os.path.isfile(cfilename):
print('Checkpoint file: ' + cfilename + " exists.")
else:
print('Checkpoint file: ' + cfilename + " does NOT exist.")
is_complete_batch = False
break
if is_complete_batch:
print('Found complete batch for batch iteration ' + str(start_at_iter))
return start_at_iter
else:
return self._get_checkpoint_iter_with_complete_batch(start_at_iter-1)
def _get_last_checkpoint_iteration_from_checkpoint_files(self):
"""
Looks through the checkpoint files and checks which ones were the last saved ones.
This allows for picking up the iterations after a completed or terminated optimization.
Also checks that the same number of batches are used, otherwise an optimization cannot be resumed
from a checkpoint.
:return: last iteration performed for complete batch
"""
print('Attempting to resume optimization from checkpoint data.')
print('Searching for existing checkpoint data ...')
# first find all the computed iters
largest_found_iter = None
if self.save_intermediate_checkpoints:
current_iter_batch = 0
while os.path.isfile(self._get_checkpoint_filename(0,current_iter_batch)):
print('Found checkpoint iteration: ' + str(current_iter_batch) + ' : ' + self._get_checkpoint_filename(0,current_iter_batch))
largest_found_iter = current_iter_batch
current_iter_batch +=1
else:
if os.path.isfile(self._get_checkpoint_filename(0,0)):
print('Found checkpoint: ' + str(self._get_checkpoint_filename(0,0)))
largest_found_iter = 0
if largest_found_iter is None:
print('Could not find any checkpoint data from which to resume.')
return None
else:
largest_iter_with_complete_batch = self._get_checkpoint_iter_with_complete_batch(largest_found_iter)
return largest_iter_with_complete_batch
def optimize(self):
"""
This optimizer performs consensus optimization:
1) (u_i_shared,u_i_individual)^{k+1} = argmin \sum_i f_i(u_i_shared,u_i_individual) + \sigma/2\|u_i_shared-u_consensus^k-z_i^k\|^2
2) (u_consensus)^{k+1} = 1/n\sum_{i=1}^n ((u_i_shared)^{k+1}-z_i^k)
3) z_i^{k+1} = z_i^k-((u_i_shared)^{k+1}-u_consensus_{k+1})
:return: n/a
"""
if self.optimizer is not None:
raise ValueError('Custom optimizers are currently not supported for consensus optimization.\
Set the optimizer by name (e.g., in the json configuration) instead.')
self._set_all_still_missing_parameters()
# todo: support reading images from file
self.nr_of_images = self.ISource.size()[0]
self.nr_of_batches = np.ceil(float(self.nr_of_images)/float(self.batch_size)).astype('int')
if self.continue_from_last_checkpoint:
last_checkpoint_iteration = self._get_last_checkpoint_iteration_from_checkpoint_files()
else:
last_checkpoint_iteration = None
if self.nr_of_batches==1:
compute_as_single_batch = True
else:
compute_as_single_batch = False
if not os.path.exists(self.checkpoint_output_directory):
os.makedirs(self.checkpoint_output_directory)
if compute_as_single_batch:
self._optimize_as_single_batch(resume_from_iter=last_checkpoint_iteration)
else:
self._optimize_with_multiple_batches(resume_from_iter=last_checkpoint_iteration)
class MultiScaleRegistrationOptimizer(ImageRegistrationOptimizer):
"""
Class to perform multi-scale optimization. Essentially puts a loop around multiple calls of the
single scale optimizer and starts with the registration of downsampled images. When moving up
the hierarchy, the registration parameters are upsampled from the solution at the previous lower resolution
"""
def __init__(self, sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=False, default_learning_rate=None ):
super(MultiScaleRegistrationOptimizer, self).__init__(sz, spacing, useMap, mapLowResFactor, params, compute_inverse_map=compute_inverse_map, default_learning_rate=default_learning_rate)
self.scaleFactors = None
"""At what image scales optimization should be computed"""
self.scaleIterations = None
"""number of iterations per scale"""
self.addSimName = None
"""name of the similarity measure to be added"""
self.addSimMeasure = None
"""similarity measure itself that should be added"""
self.add_model_name = None
"""name of the model that should be added"""
self.add_model_networkClass = None
"""network object of the model to be added"""
self.add_model_lossClass = None
"""loss object of the model to be added"""
self.model_name = None
"""name of the model to be added (if specified by name; gets dominated by specifying an optimizer directly"""
self.ssOpt = None
"""Single scale optimizer"""
self.params['optimizer'][('multi_scale', {}, 'multi scale settings')]
def write_parameters_to_settings(self):
if self.ssOpt is not None:
self.ssOpt.write_parameters_to_settings()
def add_similarity_measure(self, simName, simMeasure):
"""
Adds a custom similarity measure
:param simName: name of the similarity measure (string)
:param simMeasure: the similarity measure itself (an object that can be instantiated)
"""
self.addSimName = simName
self.addSimMeasure = simMeasure
def set_model(self, modelName):
"""
Set the model to be optimized over by name
:param modelName: the name of the model (string)
"""
self.model_name = modelName
def set_initial_map(self, map0, map0_inverse=None):
"""
Sets the initial map (overwrites the default identity map)
:param map0: intial map
:return: n/a
"""
if self.ssOpt is None:
self.initialMap = map0
self.initialInverseMap = map0_inverse
def set_initial_weight_map(self,weight_map,freeze_weight=False):
if self.ssOpt is None:
self.weight_map = weight_map
self.freeze_weight = freeze_weight
def set_pair_name(self,pair_name):
# f = lambda name: os.path.split(name)
# get_in = lambda x: os.path.splitext(f(x)[1])[0]
# get_fn = lambda x: f(f(x)[0])[1]
# get_img_name = lambda x: get_fn(x)+'_'+get_in(x)
# img_pair_name = [get_img_name(pair_name[0])+'_'+get_img_name(pair_name[1]) for pair_name in pair_names]
self.pair_name = pair_name
def set_save_fig_path(self, save_fig_path):
"""
the path of saved figures, default is the ../data/expr_name
:param save_fig_path:
:return:
"""
self.save_fig_path = os.path.join(save_fig_path, self.expr_name)
def add_model(self, add_model_name, add_model_networkClass, add_model_lossClass, use_map):
"""
Adds a custom model to be optimized over
:param add_model_name: name of the model (string)
:param add_model_networkClass: network model itself (as an object that can be instantiated)
:param add_model_lossClass: loss of the model (as an object that can be instantiated)
:param use_map: if set to true, model using a map, otherwise direcly works with the image
"""
self.add_model_name = add_model_name
self.add_model_networkClass = add_model_networkClass
self.add_model_lossClass = add_model_lossClass
self.add_model_use_map = use_map
def set_scale_factors(self, scaleFactors):
"""
Set the scale factors for the solution. Should be in decending order, e.g., [1.0, 0.5, 0.25]
:param scaleFactors: scale factors for the multi-scale solution hierarchy
"""
self.params['optimizer']['multi_scale']['scale_factors'] = (scaleFactors, 'how images are scaled')
self.scaleFactors = scaleFactors
def set_number_of_iterations_per_scale(self, scaleIterations):
"""
Sets the number of iterations that will be performed per scale of the multi-resolution hierarchy. E.g, [50,100,200]
:param scaleIterations: number of iterations per scale (array)
"""
self.params['optimizer']['multi_scale']['scale_iterations'] = (scaleIterations, 'number of iterations per scale')
self.scaleIterations = scaleIterations
def _get_desired_size_from_scale(self, origSz, scale):
osz = np.array(list(origSz))
dsz = osz
dsz[2::] = (np.round( scale*osz[2::] )).astype('int')
return dsz
def get_energy(self):
"""
Returns the current energy
:return: Returns a tuple (energy, similarity energy, regularization energy)
"""
if self.ssOpt is not None:
return self.ssOpt.get_energy()
else:
return None
def get_warped_image(self):
"""
Returns the warped image
:return: the warped image
"""
if self.ssOpt is not None:
return self.ssOpt.get_warped_image()
else:
return None
def get_warped_label(self):
"""
Returns the warped label
:return: the warped label
"""
if self.ssOpt is not None:
return self.ssOpt.get_warped_label()
else:
return None
def get_map(self):
"""
Returns the deformation map
:return: deformation map
"""
if self.ssOpt is not None:
return self.ssOpt.get_map()
else:
return None
def get_inverse_map(self):
"""
Returns the inverse deformation map
:return: deformation map
"""
if self.ssOpt is not None:
return self.ssOpt.get_inverse_map()
else:
return None
def get_model_parameters(self):
"""
Returns the parameters of the model
:return: model parameters
"""
if self.ssOpt is not None:
return self.ssOpt.get_model_parameters()
else:
return None
def set_model_parameters(self,p):
raise ValueError('Setting model parameters not yet supported for multi-scale optimizer')
def _set_all_still_missing_parameters(self):
self.scaleFactors = self.params['optimizer']['multi_scale'][('scale_factors', [1.0, 0.5, 0.25], 'how images are scaled')]
self.scaleIterations = self.params['optimizer']['multi_scale'][('scale_iterations', [10, 20, 20], 'number of iterations per scale')]
if (self.optimizer is None) and (self.optimizer_name is None):
self.optimizer_name = self.params['optimizer'][('name','lbfgs_ls','Optimizer (lbfgs|adam|sgd)')]
if self.model_name is None:
model_name = self.params['model']['registration_model'][('type', 'lddmm_shooting_map', "['svf'|'svf_quasi_momentum'|'svf_scalar_momentum'|'svf_vector_momentum'|'lddmm_shooting'|'lddmm_shooting_scalar_momentum'] all with suffix '_map' or '_image'")]
self.params['model']['deformation'][('use_map', True, 'use a map for the solution or not True/False' )]
self.set_model( model_name )
self.optimizer_has_been_initialized = True
def optimize(self):
"""
Perform the actual multi-scale optimization
"""
self._set_all_still_missing_parameters()
if (self.ISource is None) or (self.ITarget is None):
raise ValueError('Source and target images need to be set first')
upsampledParameters = None
upsampledParameterSpacing = None
upsampledSz = None
lastSuccessfulStepSizeTaken = None
nrOfScales = len(self.scaleFactors)
# check that we have the right number of iteration parameters
assert (nrOfScales == len(self.scaleIterations))
print('Performing multiscale optmization with scales: ' + str(self.scaleFactors))
# go from lowest to highest scale
reverseScales = self.scaleFactors[-1::-1]
reverseIterations = self.scaleIterations[-1::-1]
over_scale_iter_count = 0
for en_scale in enumerate(reverseScales):
print('Optimizing for scale = ' + str(en_scale[1]))
# create the images
currentScaleFactor = en_scale[1]
currentScaleNumber = en_scale[0]
currentDesiredSz = self._get_desired_size_from_scale(self.ISource.size(), currentScaleFactor)
currentNrOfIteratons = reverseIterations[currentScaleNumber]
ISourceC, spacingC = self.sampler.downsample_image_to_size(self.ISource, self.spacing, currentDesiredSz[2::],self.spline_order)
ITargetC, spacingC = self.sampler.downsample_image_to_size(self.ITarget, self.spacing, currentDesiredSz[2::],self.spline_order)
LSourceC = None
LTargetC = None
if self.LSource is not None and self.LTarget is not None:
LSourceC, spacingC = self.sampler.downsample_image_to_size(self.LSource, self.spacing, currentDesiredSz[2::],0)
LTargetC, spacingC = self.sampler.downsample_image_to_size(self.LTarget, self.spacing, currentDesiredSz[2::],0)
initialMap = None
initialInverseMap = None
weight_map=None
if self.initialMap is not None:
initialMap,_ = self.sampler.downsample_image_to_size(self.initialMap,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)
if self.initialInverseMap is not None:
initialInverseMap,_ = self.sampler.downsample_image_to_size(self.initialInverseMap,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)
if self.weight_map is not None:
weight_map,_ =self.sampler.downsample_image_to_size(self.weight_map,self.spacing, currentDesiredSz[2::],1,zero_boundary=False)
szC = np.array(ISourceC.size()) # this assumes the BxCxXxYxZ format
mapLowResFactor = None if currentScaleNumber==0 else self.mapLowResFactor
self.ssOpt = SingleScaleRegistrationOptimizer(szC, spacingC, self.useMap, mapLowResFactor, self.params, compute_inverse_map=self.compute_inverse_map,default_learning_rate=self.default_learning_rate)
print('Setting learning rate to ' + str( lastSuccessfulStepSizeTaken ))
self.ssOpt.set_last_successful_step_size_taken( lastSuccessfulStepSizeTaken )
self.ssOpt.set_initial_map(initialMap,initialInverseMap)
if ((self.add_model_name is not None) and
(self.add_model_networkClass is not None) and
(self.add_model_lossClass is not None)):
self.ssOpt.add_model(self.add_model_name, self.add_model_networkClass, self.add_model_lossClass, use_map=self.add_model_use_map)
# now set the actual model we want to solve
self.ssOpt.set_model(self.model_name)
if weight_map is not None:
self.ssOpt.set_initial_weight_map(weight_map,self.freeze_weight)
if (self.addSimName is not None) and (self.addSimMeasure is not None):
self.ssOpt.add_similarity_measure(self.addSimName, self.addSimMeasure)
# setting the optimizer
if self.optimizer is not None:
self.ssOpt.set_optimizer(self.optimizer)
self.ssOpt.set_optimizer_params(self.optimizer_params)
elif self.optimizer_name is not None:
self.ssOpt.set_optimizer_by_name(self.optimizer_name)
self.ssOpt.set_rel_ftol(self.get_rel_ftol())
self.ssOpt.set_visualization(self.get_visualization())
self.ssOpt.set_visualize_step(self.get_visualize_step())
self.ssOpt.set_n_scale(en_scale[1])
self.ssOpt.set_over_scale_iter_count(over_scale_iter_count)
if self.get_save_fig():
self.ssOpt.set_expr_name(self.get_expr_name())
self.ssOpt.set_save_fig(self.get_save_fig())
self.ssOpt.set_save_fig_path(self.get_save_fig_path())
self.ssOpt.set_save_fig_num(self.get_save_fig_num())
self.ssOpt.set_pair_name(self.get_pair_name())
self.ssOpt.set_n_scale(en_scale[1])
self.ssOpt.set_source_label(self.get_source_label())
self.ssOpt.set_target_label(self.get_target_label())
self.ssOpt.set_source_image(ISourceC)
self.ssOpt.set_target_image(ITargetC)
self.ssOpt.set_multi_scale_info(self.ISource,self.ITarget,self.spacing,self.LSource,self.LTarget)
if self.LSource is not None and self.LTarget is not None:
self.ssOpt.set_source_label(LSourceC)
self.ssOpt.set_target_label(LTargetC)
if upsampledParameters is not None:
# check that the upsampled parameters are consistent with the downsampled images
spacingError = False
expectedSpacing = None
if mapLowResFactor is not None:
expectedSpacing = utils._get_low_res_spacing_from_spacing(spacingC, szC, upsampledSz)
# the spacing of the upsampled parameters will be different
if not (abs(expectedSpacing - upsampledParameterSpacing) < 0.000001).all():
spacingError = True
elif not (abs(spacingC - upsampledParameterSpacing) < 0.000001).all():
expectedSpacing = spacingC
spacingError = True
if spacingError:
print(expectedSpacing)
print(upsampledParameterSpacing)
raise ValueError('Upsampled parameters and downsampled images are of inconsistent dimension')
# now that everything is fine, we can use the upsampled parameters
print('Explicitly setting the optimization parameters')
self.ssOpt.set_model_parameters(upsampledParameters)
# do the actual optimization
print('Optimizing for at most ' + str(currentNrOfIteratons) + ' iterations')
self.ssOpt._set_number_of_iterations_from_multi_scale(currentNrOfIteratons)
self.ssOpt.optimize()
self._add_to_history('scale_nr',currentScaleNumber)
self._add_to_history('scale_factor',currentScaleFactor)
self._add_to_history('ss_history',self.ssOpt.get_history())
lastSuccessfulStepSizeTaken = self.ssOpt.get_last_successful_step_size_taken()
over_scale_iter_count += currentNrOfIteratons
# if we are not at the very last scale, then upsample the parameters
if currentScaleNumber != nrOfScales - 1:
# we need to revert the downsampling to the next higher level
scaleTo = reverseScales[currentScaleNumber + 1]
upsampledSz = self._get_desired_size_from_scale(self.ISource.size(), scaleTo)
print('Before')
print(upsampledSz)
if self.useMap:
if self.mapLowResFactor is not None:
# parameters are upsampled differently here, because they are computed at low res
upsampledSz = utils._get_low_res_size_from_size(upsampledSz,self.mapLowResFactor)
print(self.mapLowResFactor)
print('After')
print(upsampledSz)
upsampledParameters, upsampledParameterSpacing = self.ssOpt.upsample_model_parameters(upsampledSz[2::])
| [
"torch.is_tensor",
"torch.save",
"torch.optim.SGD",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"torch.load",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
] | 1.7 | HastingsGreer/mermaid | ba07883cc3cb5982e4655048a434b4495cb49c6d |
1.0 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import json
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required
from .trainer_utils import EvaluationStrategy
from .utils import logging
if is_torch_available():
import torch
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
logger = logging.get_logger(__name__)
def default_logdir() -> str:
"""
Same default as PyTorch
"""
import socket
from datetime import datetime
current_time = datetime.now().strftime("%b%d_%H-%M-%S")
return os.path.join("runs", current_time + "_" + socket.gethostname())
@dataclass
class TrainingArguments:
"""
TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop
itself**.
Using :class:`~transformers.HfArgumentParser` we can turn this class into argparse arguments to be able to specify
them on the command line.
Parameters:
output_dir (:obj:`str`):
The output directory where the model predictions and checkpoints will be written.
overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`True`, overwrite the content of the output directory. Use this to continue training if
:obj:`output_dir` points to a checkpoint directory.
do_train (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_eval (:obj:`bool`, `optional`):
Whether to run evaluation on the dev set or not. Will be set to :obj:`True` if :obj:`evaluation_strategy`
is different from :obj:`"no"`. This argument is not directly used by :class:`~transformers.Trainer`, it's
intended to be used by your training/evaluation scripts instead. See the `example scripts
<https://github.com/huggingface/transformers/tree/master/examples>`__ for more details.
do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to run predictions on the test set or not. This argument is not directly used by
:class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See
the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more
details.
evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`):
The evaluation strategy to adopt during training. Possible values are:
* :obj:`"no"`: No evaluation is done during training.
* :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`.
* :obj:`"epoch"`: Evaluation is done at the end of each epoch.
prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):
When performing evaluation and predictions, only returns the loss.
per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for training.
per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8):
The batch size per GPU/TPU core/CPU for evaluation.
gradient_accumulation_steps (:obj:`int`, `optional`, defaults to 1):
Number of updates steps to accumulate the gradients for, before performing a backward/update pass.
.. warning::
When using gradient accumulation, one step is counted as one step with backward pass. Therefore,
logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training
examples.
eval_accumulation_steps (:obj:`int`, `optional`):
Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU. If
left unset, the whole predictions are accumulated on GPU/TPU before being moved to the CPU (faster but
requires more memory).
learning_rate (:obj:`float`, `optional`, defaults to 5e-5):
The initial learning rate for Adam.
weight_decay (:obj:`float`, `optional`, defaults to 0):
The weight decay to apply (if not zero).
adam_beta1 (:obj:`float`, `optional`, defaults to 0.9):
The beta1 for the Adam optimizer.
adam_beta2 (:obj:`float`, `optional`, defaults to 0.999):
The beta2 for the Adam optimizer.
adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8):
Epsilon for the Adam optimizer.
max_grad_norm (:obj:`float`, `optional`, defaults to 1.0):
Maximum gradient norm (for gradient clipping).
num_train_epochs(:obj:`float`, `optional`, defaults to 3.0):
Total number of training epochs to perform (if not an integer, will perform the decimal part percents of
the last epoch before stopping training).
max_steps (:obj:`int`, `optional`, defaults to -1):
If set to a positive number, the total number of training steps to perform. Overrides
:obj:`num_train_epochs`.
warmup_steps (:obj:`int`, `optional`, defaults to 0):
Number of steps used for a linear warmup from 0 to :obj:`learning_rate`.
logging_dir (:obj:`str`, `optional`):
Tensorboard log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`.
logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to log and evaluate the first :obj:`global_step` or not.
logging_steps (:obj:`int`, `optional`, defaults to 500):
Number of update steps between two logs.
save_steps (:obj:`int`, `optional`, defaults to 500):
Number of updates steps before two checkpoint saves.
save_total_limit (:obj:`int`, `optional`):
If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in
:obj:`output_dir`.
no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to not use CUDA even when it is available or not.
seed (:obj:`int`, `optional`, defaults to 42):
Random seed for initialization.
fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to use 16-bit (mixed) precision training (through NVIDIA apex) instead of 32-bit training.
fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'):
For :obj:`fp16` training, apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details
on the `apex documentation <https://nvidia.github.io/apex/amp.html>`__.
local_rank (:obj:`int`, `optional`, defaults to -1):
During distributed training, the rank of the process.
tpu_num_cores (:obj:`int`, `optional`):
When training on TPU, the number of TPU cores (automatically passed by launcher script).
debug (:obj:`bool`, `optional`, defaults to :obj:`False`):
When training on TPU, whether to print debug metrics or not.
dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size)
or not.
eval_steps (:obj:`int`, `optional`):
Number of update steps between two evaluations if :obj:`evaluation_strategy="steps"`. Will default to the
same value as :obj:`logging_steps` if not set.
dataloader_num_workers (:obj:`int`, `optional`, defaults to 0):
Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the
main process.
past_index (:obj:`int`, `optional`, defaults to -1):
Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can
make use of the past hidden states for their predictions. If this argument is set to a positive int, the
``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model
at the next training step under the keyword argument ``mems``.
run_name (:obj:`str`, `optional`):
A descriptor for the run. Notably used for wandb logging.
disable_tqdm (:obj:`bool`, `optional`):
Whether or not to disable the tqdm progress bars. Will default to :obj:`True` if the logging level is set
to warn or lower (default), :obj:`False` otherwise.
remove_unused_columns (:obj:`bool`, `optional`, defaults to :obj:`True`):
If using `nlp.Dataset` datasets, whether or not to automatically remove the columns unused by the model
forward method.
(Note that this behavior is not implemented for :class:`~transformers.TFTrainer` yet.)
label_names (:obj:`List[str]`, `optional`):
The list of keys in your dictionary of inputs that correspond to the labels.
Will eventually default to :obj:`["labels"]` except if the model used is one of the
:obj:`XxxForQuestionAnswering` in which case it will default to :obj:`["start_positions",
"end_positions"]`.
load_best_model_at_end (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to load the best model found during training at the end of training.
.. note::
When set to :obj:`True`, the parameters :obj:`save_steps` will be ignored and the model will be saved
after each evaluation.
metric_for_best_model (:obj:`str`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` to specify the metric to use to compare two different
models. Must be the name of a metric returned by the evaluation with or without the prefix :obj:`"eval_"`.
Will default to :obj:`"loss"` if unspecified and :obj:`load_best_model_at_end=True` (to use the evaluation
loss).
If you set this value, :obj:`greater_is_better` will default to :obj:`True`. Don't forget to set it to
:obj:`False` if your metric is better when lower.
greater_is_better (:obj:`bool`, `optional`):
Use in conjunction with :obj:`load_best_model_at_end` and :obj:`metric_for_best_model` to specify if better
models should have a greater metric or not. Will default to:
- :obj:`True` if :obj:`metric_for_best_model` is set to a value that isn't :obj:`"loss"` or
:obj:`"eval_loss"`.
- :obj:`False` if :obj:`metric_for_best_model` is not set, or set to :obj:`"loss"` or :obj:`"eval_loss"`.
model_parallel (:obj:`bool`, `optional`, defaults to :obj:`False`):
If there are more than one devices, whether to use model parallelism to distribute the model's modules
across devices or not.
ignore_data_skip (:obj:`bool`, `optional`, defaults to :obj:`False`):
When resuming training, whether or not to skip the epochs and batches to get the data loading at the same
stage as in the previous training. If set to :obj:`True`, the training will begin faster (as that skipping
step can take a long time) but will not yield the same results as the interrupted training would have.
"""
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."}
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory."
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=None, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
model_parallel: bool = field(
default=False,
metadata={
"help": (
"If there are more than one devices, whether to use model parallelism to distribute the "
"model's modules across devices."
)
},
)
evaluation_strategy: EvaluationStrategy = field(
default="no",
metadata={"help": "Run evaluation during training at each logging step."},
)
prediction_loss_only: bool = field(
default=False,
metadata={"help": "When performing evaluation and predictions, only returns the loss."},
)
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
per_gpu_train_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_train_batch_size` is preferred. "
"Batch size per GPU/TPU core/CPU for training."
},
)
per_gpu_eval_batch_size: Optional[int] = field(
default=None,
metadata={
"help": "Deprecated, the use of `--per_device_eval_batch_size` is preferred."
"Batch size per GPU/TPU core/CPU for evaluation."
},
)
gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of updates steps to accumulate before performing a backward/update pass."},
)
eval_accumulation_steps: Optional[int] = field(
default=None,
metadata={"help": "Number of predictions steps to accumulate before moving the tensors to the CPU."},
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for Adam."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for Adam optimizer."})
max_grad_norm: float = field(default=1.0, metadata={"help": "Max gradient norm."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
max_steps: int = field(
default=-1,
metadata={"help": "If > 0: set total number of training steps to perform. Override num_train_epochs."},
)
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={"help": "Tensorboard log dir."})
logging_first_step: bool = field(default=False, metadata={"help": "Log the first global_step"})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
save_total_limit: Optional[int] = field(
default=None,
metadata={
"help": (
"Limit the total amount of checkpoints."
"Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints"
)
},
)
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
seed: int = field(default=42, metadata={"help": "random seed for initialization"})
fp16: bool = field(
default=False,
metadata={"help": "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"},
)
fp16_opt_level: str = field(
default="O1",
metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
)
},
)
local_rank: int = field(default=-1, metadata={"help": "For distributed training: local_rank"})
tpu_num_cores: Optional[int] = field(
default=None, metadata={"help": "TPU: Number of TPU cores (automatically passed by launcher script)"}
)
tpu_metrics_debug: bool = field(
default=False,
metadata={"help": "Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics"},
)
debug: bool = field(default=False, metadata={"help": "Whether to print debug metrics on TPU"})
dataloader_drop_last: bool = field(
default=False, metadata={"help": "Drop the last incomplete batch if it is not divisible by the batch size."}
)
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
dataloader_num_workers: int = field(
default=0,
metadata={
"help": "Number of subprocesses to use for data loading (PyTorch only). 0 means that the data will be loaded in the main process."
},
)
past_index: int = field(
default=-1,
metadata={"help": "If >=0, uses the corresponding part of the output as the past state for next step."},
)
run_name: Optional[str] = field(
default=None, metadata={"help": "An optional descriptor for the run. Notably used for wandb logging."}
)
disable_tqdm: Optional[bool] = field(
default=None, metadata={"help": "Whether or not to disable the tqdm progress bars."}
)
remove_unused_columns: Optional[bool] = field(
default=True, metadata={"help": "Remove columns not required by the model when using an nlp.Dataset."}
)
label_names: Optional[List[str]] = field(
default=None, metadata={"help": "The list of keys in your dictionary of inputs that correspond to the labels."}
)
load_best_model_at_end: Optional[bool] = field(
default=False,
metadata={"help": "Whether or not to load the best model found during training at the end of training."},
)
metric_for_best_model: Optional[str] = field(
default=None, metadata={"help": "The metric to use to compare two different models."}
)
greater_is_better: Optional[bool] = field(
default=None, metadata={"help": "Whether the `metric_for_best_model` should be maximized or not."}
)
ignore_data_skip: bool = field(
default=False,
metadata={
"help": "When resuming training, whether or not to skip the first epochs and batches to get to the same training data."
},
)
def __post_init__(self):
if self.disable_tqdm is None:
self.disable_tqdm = logger.getEffectiveLevel() > logging.WARN
self.evaluation_strategy = EvaluationStrategy(self.evaluation_strategy)
if self.do_eval is False and self.evaluation_strategy != EvaluationStrategy.NO:
self.do_eval = True
if self.eval_steps is None:
self.eval_steps = self.logging_steps
if self.load_best_model_at_end and self.metric_for_best_model is None:
self.metric_for_best_model = "loss"
if self.greater_is_better is None and self.metric_for_best_model is not None:
self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"]
if self.run_name is None:
self.run_name = self.output_dir
if is_torch_available() and self.device.type != "cuda" and self.fp16:
raise ValueError("AMP (`--fp16`) can only be used on CUDA devices.")
@property
def train_batch_size(self) -> int:
"""
The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).
"""
if self.per_gpu_train_batch_size:
logger.warning(
"Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future "
"version. Using `--per_device_train_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size
if not self.model_parallel:
train_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
train_batch_size = per_device_batch_size
return train_batch_size
@property
def eval_batch_size(self) -> int:
"""
The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).
"""
if self.per_gpu_eval_batch_size:
logger.warning(
"Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future "
"version. Using `--per_device_eval_batch_size` is preferred."
)
per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size
if not self.model_parallel:
eval_batch_size = per_device_batch_size * max(1, self.n_gpu)
else:
eval_batch_size = per_device_batch_size
return eval_batch_size
@cached_property
@torch_required
def _setup_devices(self) -> Tuple["torch.device", int]:
logger.info("PyTorch: setting up devices")
if self.no_cuda:
device = torch.device("cpu")
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
device = torch.device("cuda", self.local_rank)
n_gpu = 1
if device.type == "cuda":
torch.cuda.set_device(device)
return device, n_gpu
@property
@torch_required
def device(self) -> "torch.device":
"""
The device used by this process.
"""
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
"""
The number of GPUs used by this process.
Note:
This will only be greater than one when you have multiple GPUs available but are not using distributed
training. For distributed training, it will always be 1.
"""
return self._setup_devices[1]
@property
@torch_required
def parallel_mode(self):
"""
The current mode used for parallelism if multiple GPUs/TPU cores are available. One of:
- :obj:`ParallelMode.NOT_PARALLEL`: no parallelism (CPU or one GPU).
- :obj:`ParallelMode.NOT_DISTRIBUTED`: several GPUs in one single process (uses :obj:`torch.nn.DataParallel`).
- :obj:`ParallelMode.DISTRIBUTED`: several GPUs, each ahving its own process (uses
:obj:`torch.nn.DistributedDataParallel`).
- :obj:`ParallelMode.TPU`: several TPU cores.
"""
if is_torch_tpu_available():
return ParallelMode.TPU
elif self.local_rank != -1:
return ParallelMode.DISTRIBUTED
elif self.n_gpu > 1:
return ParallelMode.NOT_DISTRIBUTED
else:
return ParallelMode.NOT_PARALLEL
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support).
"""
d = dataclasses.asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
return d
def to_json_string(self):
"""
Serializes this instance to a JSON string.
"""
return json.dumps(self.to_dict(), indent=2)
def to_sanitized_dict(self) -> Dict[str, Any]:
"""
Sanitized serialization to use with TensorBoard’s hparams
"""
d = self.to_dict()
d = {**d, **{"train_batch_size": self.train_batch_size, "eval_batch_size": self.eval_batch_size}}
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: v if type(v) in valid_types else str(v) for k, v in d.items()}
class ParallelMode(Enum):
NOT_PARALLEL = "not_parallel"
NOT_DISTRIBUTED = "not_distributed"
DISTRIBUTED = "distributed"
TPU = "tpu"
| [
"torch.device",
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.is_available"
] | 1.0 | hlahkar/transformers | c19d04623eacfbc2c452397a5eda0fde42db3fc5 |
1.4 | import Archi
import yaml
def test_model_loading():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
assert 'KeyValueMemory' in model.modules.keys()
assert 'key_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'value_memory' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'read_key_plus_conf' in model.stream_handler.placeholders['inputs']['KeyValueMemory'].keys()
assert 'CoreLSTM' in model.modules.keys()
assert 'CoreLSTM' in model.stream_handler.placeholders['inputs'].keys()
assert 'hidden' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'cell' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
assert 'iteration' in model.stream_handler.placeholders['inputs']['CoreLSTM'].keys()
def test_model_forward():
try:
config = yaml.safe_load(
open("./esbn_model_test_config.yaml", 'r'),
)
except yaml.YANNLError as e:
print(e)
from Archi import load_model
model = load_model(config)
import torch
inputs_dict = {
'x':torch.rand(4,3,64,64),
}
output = model(**inputs_dict)
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
output1 = model(**inputs_dict)
assert 'lstm_output' in output['modules']['CoreLSTM']
assert 'processed_input' in output['modules']['Encoder']
assert 'processed_input' in output['modules']['ToGateFCN']
assert output['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() == 0.0
assert output1['inputs']['KeyValueMemory']['read_key_plus_conf'][0].max() != 0.0
assert len(dict(model.named_parameters())) != 0
for np, p in model.named_parameters():
print(np)
if __name__ == '__main__':
test_model_loading()
test_model_forward()
| [
"torch.rand"
] | 1.4 | Near32/Archi | 0005713fa4e37c7cd9b34cd257c481d08928db8a |
1.0 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, Optional, Union
import numpy as np
from ..utils import is_torch_available, logging
from .audio_utils import ffmpeg_read
from .base import ChunkPipeline
if TYPE_CHECKING:
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
logger = logging.get_logger(__name__)
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_CTC_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
def rescale_stride(tokens_or_logits, stride, ratio):
"""
Rescales the stride values from audio space to tokens/logits space.
(160_000, 16_000, 16_000) -> (2000, 200, 200) for instance.
"""
# Shape is [B, SEQ] for tokens
# [B, SEQ, V] for logits
new_strides = []
for input_n, left, right in stride:
token_n = int(round(input_n * ratio))
left = int(round(left / input_n * token_n))
right = int(round(right / input_n * token_n))
new_stride = (token_n, left, right)
new_strides.append(new_stride)
return new_strides
def chunk_iter(inputs, feature_extractor, chunk_len, stride_left, stride_right):
inputs_len = inputs.shape[0]
step = chunk_len - stride_left - stride_right
for i in range(0, inputs_len, step):
# add start and end paddings to the chunk
chunk = inputs[i : i + chunk_len]
processed = feature_extractor(chunk, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")
_stride_left = 0 if i == 0 else stride_left
is_last = i + step + stride_left >= inputs_len
_stride_right = 0 if is_last else stride_right
if chunk.shape[0] > _stride_left:
yield {"is_last": is_last, "stride": (chunk.shape[0], _stride_left, _stride_right), **processed}
class AutomaticSpeechRecognitionPipeline(ChunkPipeline):
"""
Pipeline that aims at extracting spoken text contained within some audio.
The input can be either a raw waveform or a audio file. In case of the audio file, ffmpeg should be installed for
to support multiple audio formats
Arguments:
model ([`PreTrainedModel`] or [`TFPreTrainedModel`]):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
[`PreTrainedModel`] for PyTorch and [`TFPreTrainedModel`] for TensorFlow.
tokenizer ([`PreTrainedTokenizer`]):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
[`PreTrainedTokenizer`].
feature_extractor ([`SequenceFeatureExtractor`]):
The feature extractor that will be used by the pipeline to encode waveform for the model.
chunk_length_s (`float`, *optional*, defaults to 0):
The input length for in each chunk. If `chunk_length_s = 0` then chunking is disabled (default). Only
available for CTC models, e.g. [`Wav2Vec2ForCTC`].
<Tip>
For more information on how to effectively use `chunk_length_s`, please have a look at the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):
The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables
the model to *see* more context and infer letters better than without this context but the pipeline
discards the stride bits at the end to make the final reconstitution as perfect as possible.
<Tip>
For more information on how to effectively use `stride_length_s`, please have a look at the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed. If no framework is specified, will default to the one currently installed. If no framework is
specified and both frameworks are installed, will default to the framework of the `model`, or to PyTorch if
no model is provided.
device (`int`, *optional*, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
decoder (`pyctcdecode.BeamSearchDecoderCTC`, *optional*):
[PyCTCDecode's
BeamSearchDecoderCTC](https://github.com/kensho-technologies/pyctcdecode/blob/2fd33dc37c4111417e08d89ccd23d28e9b308d19/pyctcdecode/decoder.py#L180)
can be passed for language model boosted decoding. See [`Wav2Vec2ProcessorWithLM`] for more information.
"""
def __init__(self, feature_extractor: Union["SequenceFeatureExtractor", str], *args, **kwargs):
super().__init__(*args, **kwargs)
self.feature_extractor = feature_extractor
if self.model.__class__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.values():
self.type = "seq2seq"
elif (
feature_extractor._processor_class
and feature_extractor._processor_class.endswith("WithLM")
and kwargs.get("decoder", None) is not None
):
self.decoder = kwargs["decoder"]
self.type = "ctc_with_lm"
else:
self.type = "ctc"
if self.framework == "tf":
raise ValueError("The AutomaticSpeechRecognitionPipeline is only available in PyTorch.")
self.check_model_type(dict(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items() + MODEL_FOR_CTC_MAPPING.items()))
def __call__(
self,
inputs: Union[np.ndarray, bytes, str],
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
information.
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either :
- `str` that is the filename of the audio file, the file will be read at the correct sampling rate
to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio at the correct sampling rate (no further check will be done)
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "raw":
np.array}` with optionally a `"stride": (left: int, right: int)` than can ask the pipeline to
treat the first `left` samples and last `right` samples to be ignored in decoding (but used at
inference to provide more context to the model). Only use `stride` with CTC models.
return_timestamps (*optional*, `str`):
Only available for pure CTC models. If set to `"char"`, the pipeline will return `timestamps` along the
text for every character in the text. For instance if you get `[{"text": "h", "timestamps": (0.5,0.6),
{"text": "i", "timestamps": (0.7, .9)}]`, then it means the model predicts that the letter "h" was
pronounced after `0.5` and before `0.6` seconds. If set to `"word"`, the pipeline will return
`timestamps` along the text for every word in the text. For instance if you get `[{"text": "hi ",
"timestamps": (0.5,0.9), {"text": "there", "timestamps": (1.0, .1.5)}]`, then it means the model
predicts that the word "hi" was pronounces before 0.5 and after 0.9 seconds.
Return:
`Dict`: A dictionary with the following keys:
- **text** (`str` ) -- The recognized text.
- **chunks** (*optional(, `List[Dict]`)
When using `return_timestamps`, the `chunks` will become a list containing all the various text
chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamps": (0.5,0.9), {"text":
"there", "timestamps": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing
`"".join(chunk["text"] for chunk in output["chunks"])`.
"""
return super().__call__(inputs, **kwargs)
def _sanitize_parameters(self, **kwargs):
# No parameters on this pipeline right now
preprocess_params = {}
if "chunk_length_s" in kwargs:
preprocess_params["chunk_length_s"] = kwargs["chunk_length_s"]
if "stride_length_s" in kwargs:
preprocess_params["stride_length_s"] = kwargs["stride_length_s"]
postprocess_params = {}
if "decoder_kwargs" in kwargs:
postprocess_params["decoder_kwargs"] = kwargs["decoder_kwargs"]
if "return_timestamps" in kwargs:
postprocess_params["return_timestamps"] = kwargs["return_timestamps"]
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None):
if isinstance(inputs, str):
with open(inputs, "rb") as f:
inputs = f.read()
if isinstance(inputs, bytes):
inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate)
stride = None
extra = {}
if isinstance(inputs, dict):
stride = inputs.pop("stride", None)
_inputs = inputs.pop("raw")
in_sampling_rate = inputs.pop("sampling_rate")
extra = inputs
inputs = _inputs
if in_sampling_rate != self.feature_extractor.sampling_rate:
import torch
from torchaudio import functional as F
inputs = F.resample(
torch.from_numpy(inputs), in_sampling_rate, self.feature_extractor.sampling_rate
).numpy()
ratio = self.feature_extractor.sampling_rate / in_sampling_rate
else:
ratio = 1
if stride is not None:
if stride[0] + stride[1] > inputs.shape[0]:
raise ValueError("Stride is too large for input")
# Stride needs to get the chunk length here, it's going to get
# swallowed by the `feature_extractor` later, and then batching
# can add extra data in the inputs, so we need to keep track
# of the original length in the stride so we can cut properly.
stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio)))
if not isinstance(inputs, np.ndarray):
raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`")
if len(inputs.shape) != 1:
raise ValueError("We expect a single channel audio input for AutomaticSpeechRecognitionPipeline")
if chunk_length_s:
if stride_length_s is None:
stride_length_s = chunk_length_s / 6
if isinstance(stride_length_s, (int, float)):
stride_length_s = [stride_length_s, stride_length_s]
# XXX: Carefuly, this variable will not exist in `seq2seq` setting.
# Currently chunking is not possible at this level for `seq2seq` so
# it's ok.
align_to = self.model.config.inputs_to_logits_ratio
chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to)) * align_to
stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to)) * align_to
stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to)) * align_to
if self.type not in {"ctc", "ctc_with_lm"}:
raise ValueError(
"`chunk_length_s` is only valid for CTC models, use other chunking options for other models"
)
if chunk_len < stride_left + stride_right:
raise ValueError("Chunk length must be superior to stride length")
# make sure that
for item in chunk_iter(inputs, self.feature_extractor, chunk_len, stride_left, stride_right):
yield item
else:
processed = self.feature_extractor(
inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors="pt"
)
if stride is not None:
if self.model.__class__ in MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.values():
raise ValueError("Stride is only usable with CTC models, try removing it")
processed["stride"] = stride
yield {"is_last": True, **processed, **extra}
def _forward(self, model_inputs):
is_last = model_inputs.pop("is_last")
if self.type == "seq2seq":
encoder = self.model.get_encoder()
# we need to pass `processed.get("attention_mask")` here since audio encoder
# attention mask length is different from expected text decoder `encoder_attention_mask` length
# `generate` magic to create the mask automatically won't work, we basically need to help
# it here.
# Consume values so we can let extra information flow freely through
# the pipeline (important for `partial` in microphone)
if "input_features" in model_inputs:
inputs = model_inputs.pop("input_features")
elif "input_values" in model_inputs:
inputs = model_inputs.pop("input_values")
else:
raise ValueError(
"Seq2Seq speech recognition model requires either a "
f"`input_features` or `input_values` key, but only has {model_inputs.keys()}"
)
attention_mask = model_inputs.pop("attention_mask", None)
tokens = self.model.generate(
encoder_outputs=encoder(inputs, attention_mask=attention_mask),
attention_mask=attention_mask,
)
out = {"tokens": tokens}
else:
stride = model_inputs.pop("stride", None)
input_values = model_inputs.pop("input_values")
attention_mask = model_inputs.pop("attention_mask", None)
outputs = self.model(input_values=input_values, attention_mask=attention_mask)
logits = outputs.logits
if self.type == "ctc_with_lm":
out = {"logits": logits}
else:
out = {"tokens": logits.argmax(dim=-1)}
if stride is not None:
# Send stride to `postprocess`.
# it needs to be handled there where
# the pieces are to be concatenated.
ratio = 1 / self.model.config.inputs_to_logits_ratio
if isinstance(stride, tuple):
out["stride"] = rescale_stride(logits, [stride], ratio)[0]
else:
out["stride"] = rescale_stride(logits, stride, ratio)
# Leftover
extra = model_inputs
return {"is_last": is_last, **out, **extra}
def postprocess(self, model_outputs, decoder_kwargs: Optional[Dict] = None, return_timestamps=None):
# Optional return types
optional = {}
if return_timestamps and self.type == "seq2seq":
raise ValueError("We cannot return_timestamps yet on non-ctc models !")
if return_timestamps == "char" and self.type == "ctc_with_lm":
raise ValueError("CTC with LM cannot return `char` timestamps, only `words`")
final_items = []
key = "logits" if self.type == "ctc_with_lm" else "tokens"
for outputs in model_outputs:
items = outputs[key].numpy()
stride = outputs.pop("stride", None)
if stride is not None:
total_n, left, right = stride
# Total_n might be < logits.shape[1]
# because of padding, that's why
# we need to reconstruct this information
# This won't work with left padding (which doesn't exist right now)
right_n = total_n - right
items = items[:, left:right_n]
final_items.append(items)
items = np.concatenate(final_items, axis=1)
items = items.squeeze(0)
if self.type == "ctc_with_lm":
if decoder_kwargs is None:
decoder_kwargs = {}
beams = self.decoder.decode_beams(items, **decoder_kwargs)
text = beams[0][0]
if return_timestamps:
# Simply cast from pyctcdecode format to wav2vec2 format to leverage
# pre-existing code later
chunk_offset = beams[0][2]
word_offsets = []
for word, (start_offset, end_offset) in chunk_offset:
word_offsets.append({"word": word, "start_offset": start_offset, "end_offset": end_offset})
else:
skip_special_tokens = self.type != "ctc"
text = self.tokenizer.decode(items, skip_special_tokens=skip_special_tokens)
if return_timestamps:
char_offsets = self.tokenizer.decode(
items, skip_special_tokens=skip_special_tokens, output_char_offsets=True
)["char_offsets"]
if return_timestamps == "word":
word_offsets = self.tokenizer._get_word_offsets(
char_offsets, self.tokenizer.replace_word_delimiter_char
)
if return_timestamps:
if return_timestamps == "word":
offsets = word_offsets
else:
offsets = char_offsets
chunks = []
for item in offsets:
start = item["start_offset"] * self.model.config.inputs_to_logits_ratio
start /= self.feature_extractor.sampling_rate
stop = item["end_offset"] * self.model.config.inputs_to_logits_ratio
stop /= self.feature_extractor.sampling_rate
chunks.append({"text": item[return_timestamps], "timestamp": (start, stop)})
optional["chunks"] = chunks
extra = defaultdict(list)
for output in model_outputs:
output.pop("tokens", None)
output.pop("logits", None)
output.pop("is_last", None)
for k, v in output.items():
extra[k].append(v)
return {"text": text, **optional, **extra}
| [
"torch.from_numpy"
] | 1.0 | techthiyanes/transformers | 705d65368fb28246534ef636fe62c008f4fb2682 |
1.0 | # Copyright 2019 PIQuIL - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import torch
from torch.nn import functional as F
from qucumber import _warn_on_missing_gpu
from qucumber.utils import cplx, unitaries
from qucumber.rbm import PurificationRBM
from .neural_state import NeuralStateBase
class DensityMatrix(NeuralStateBase):
r"""
:param num_visible: The number of visible units, i.e. the size of the system
:type num_visible: int
:param num_hidden: The number of units in the hidden layer
:type num_hidden: int
:param num_aux: The number of units in the purification layer
:type num_aux: int
:param unitary_dict: A dictionary associating bases with their unitary rotations
:type unitary_dict: dict[str, torch.Tensor]
:param gpu: Whether to perform computations on the default gpu.
:type gpu: bool
"""
_rbm_am = None
_rbm_ph = None
_device = None
def __init__(
self,
num_visible,
num_hidden=None,
num_aux=None,
unitary_dict=None,
gpu=False,
module=None,
):
if gpu and torch.cuda.is_available():
warnings.warn(
"Using DensityMatrix on GPU is not recommended due to poor performance compared to CPU.",
ResourceWarning,
2,
)
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
if module is None:
self.rbm_am = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
self.rbm_ph = PurificationRBM(num_visible, num_hidden, num_aux, gpu=gpu)
else:
_warn_on_missing_gpu(gpu)
self.rbm_am = module.to(self.device)
self.rbm_am.device = self.device
self.rbm_ph = module.to(self.device).clone()
self.rbm_ph.device = self.device
self.num_visible = self.rbm_am.num_visible
self.num_hidden = self.rbm_am.num_hidden
self.num_aux = self.rbm_am.num_aux
self.device = self.rbm_am.device
self.unitary_dict = unitary_dict if unitary_dict else unitaries.create_dict()
self.unitary_dict = {
k: v.to(device=self.device) for k, v in self.unitary_dict.items()
}
@property
def networks(self):
return ["rbm_am", "rbm_ph"]
@property
def rbm_am(self):
return self._rbm_am
@rbm_am.setter
def rbm_am(self, new_val):
self._rbm_am = new_val
@property
def rbm_ph(self):
"""RBM used to learn the wavefunction phase."""
return self._rbm_ph
@rbm_ph.setter
def rbm_ph(self, new_val):
self._rbm_ph = new_val
@property
def device(self):
return self._device
@device.setter
def device(self, new_val):
self._device = new_val
def pi(self, v, vp, expand=True):
r"""Calculates elements of the :math:`\Pi` matrix.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\Pi|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\Pi|\sigma'_i\rangle`.
:param v: A batch of visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other batch of visible state, :math:`\sigma'`.
:type vp: torch.Tensor
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
:type expand: bool
:returns: The matrix elements given by :math:`\langle\sigma|\Pi|\sigma'\rangle`
:rtype: torch.Tensor
"""
m_am = F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias)
mp_am = F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias)
m_ph = F.linear(v, self.rbm_ph.weights_U)
mp_ph = F.linear(vp, self.rbm_ph.weights_U)
if expand and v.dim() >= 2:
m_am = m_am.unsqueeze_(1)
m_ph = m_ph.unsqueeze_(1)
if expand and vp.dim() >= 2:
mp_am = mp_am.unsqueeze_(0)
mp_ph = mp_ph.unsqueeze_(0)
exp_arg = (m_am + mp_am) / 2
phase = (m_ph - mp_ph) / 2
real = (
(1 + 2 * exp_arg.exp() * phase.cos() + (2 * exp_arg).exp())
.sqrt()
.log()
.sum(-1)
)
imag = torch.atan2(
(exp_arg.exp() * phase.sin()), (1 + exp_arg.exp() * phase.cos())
).sum(-1)
return cplx.make_complex(real, imag)
def pi_grad(self, v, vp, phase=False, expand=False):
r"""Calculates the gradient of the :math:`\Pi` matrix with
respect to the amplitude RBM parameters for two input states
:param v: One of the visible states, :math:`\sigma`
:type v: torch.Tensor
:param vp: The other visible state, :math`\sigma'`
:type vp: torch.Tensor
:param phase: Whether to compute the gradients for the phase RBM (`True`)
or the amplitude RBM (`False`)
:type phase: bool
:returns: The matrix element of the gradient given by
:math:`\langle\sigma|\nabla_\lambda\Pi|\sigma'\rangle`
:rtype: torch.Tensor
"""
unsqueezed = v.dim() < 2 or vp.dim() < 2
v = (v.unsqueeze(0) if v.dim() < 2 else v).to(self.rbm_am.weights_W)
vp = (vp.unsqueeze(0) if vp.dim() < 2 else vp).to(self.rbm_am.weights_W)
if expand:
arg_real = 0.5 * (
F.linear(v, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(1)
+ F.linear(vp, self.rbm_am.weights_U, self.rbm_am.aux_bias).unsqueeze_(
0
)
)
arg_imag = 0.5 * (
F.linear(v, self.rbm_ph.weights_U).unsqueeze_(1)
- F.linear(vp, self.rbm_ph.weights_U).unsqueeze_(0)
)
else:
arg_real = self.rbm_am.mixing_term(v + vp)
arg_imag = self.rbm_ph.mixing_term(v - vp)
sig = cplx.sigmoid(arg_real, arg_imag)
batch_sizes = (
(v.shape[0], vp.shape[0], *v.shape[1:-1]) if expand else (*v.shape[:-1],)
)
W_grad = torch.zeros_like(self.rbm_am.weights_W).expand(*batch_sizes, -1, -1)
vb_grad = torch.zeros_like(self.rbm_am.visible_bias).expand(*batch_sizes, -1)
hb_grad = torch.zeros_like(self.rbm_am.hidden_bias).expand(*batch_sizes, -1)
if phase:
temp = (v.unsqueeze(1) - vp.unsqueeze(0)) if expand else (v - vp)
sig = cplx.scalar_mult(sig, cplx.I)
ab_grad_real = torch.zeros_like(self.rbm_ph.aux_bias).expand(
*batch_sizes, -1
)
ab_grad_imag = ab_grad_real.clone()
else:
temp = (v.unsqueeze(1) + vp.unsqueeze(0)) if expand else (v + vp)
ab_grad_real = cplx.real(sig)
ab_grad_imag = cplx.imag(sig)
U_grad = 0.5 * torch.einsum("c...j,...k->c...jk", sig, temp)
U_grad_real = cplx.real(U_grad)
U_grad_imag = cplx.imag(U_grad)
vec_real = [
W_grad.view(*batch_sizes, -1),
U_grad_real.view(*batch_sizes, -1),
vb_grad,
hb_grad,
ab_grad_real,
]
vec_imag = [
W_grad.view(*batch_sizes, -1).clone(),
U_grad_imag.view(*batch_sizes, -1),
vb_grad.clone(),
hb_grad.clone(),
ab_grad_imag,
]
if unsqueezed and not expand:
vec_real = [grad.squeeze_(0) for grad in vec_real]
vec_imag = [grad.squeeze_(0) for grad in vec_imag]
return cplx.make_complex(
torch.cat(vec_real, dim=-1), torch.cat(vec_imag, dim=-1)
)
def rho(self, v, vp=None, expand=True):
r"""Computes the matrix elements of the (unnormalized) density matrix.
If `expand` is `True`, will return a complex matrix
:math:`A_{ij} = \langle\sigma_i|\widetilde{\rho}|\sigma'_j\rangle`.
Otherwise will return a complex vector
:math:`A_{i} = \langle\sigma_i|\widetilde{\rho}|\sigma'_i\rangle`.
:param v: One of the visible states, :math:`\sigma`.
:type v: torch.Tensor
:param vp: The other visible state, :math:`\sigma'`.
If `None`, will be set to `v`.
:type vp: torch.Tensor
:param expand: Whether to return a matrix (`True`) or a vector (`False`).
:type expand: bool
:returns: The elements of the current density matrix
:math:`\langle\sigma|\widetilde{\rho}|\sigma'\rangle`
:rtype: torch.Tensor
"""
if expand is False and vp is None:
return cplx.make_complex(self.probability(v))
elif vp is None:
vp = v
pi_ = self.pi(v, vp, expand=expand)
amp = (self.rbm_am.gamma(v, vp, eta=+1, expand=expand) + cplx.real(pi_)).exp()
phase = self.rbm_ph.gamma(v, vp, eta=-1, expand=expand) + cplx.imag(pi_)
return cplx.make_complex(amp * phase.cos(), amp * phase.sin())
def importance_sampling_numerator(self, vp, v):
return self.rho(vp, v, expand=False)
def importance_sampling_denominator(self, v):
return cplx.make_complex(self.probability(v))
def rotated_gradient(self, basis, sample):
r"""Computes the gradients rotated into the measurement basis
:param basis: The bases in which the measurement is made
:type basis: numpy.ndarray
:param sample: The measurement (either 0 or 1)
:type sample: torch.Tensor
:returns: A list of two tensors, representing the rotated gradients
of the amplitude and phase RBMs
:rtype: list[torch.Tensor, torch.Tensor]
"""
UrhoU, UrhoU_v, v = unitaries.rotate_rho_probs(
self, basis, sample, include_extras=True
)
inv_UrhoU = 1 / (UrhoU + 1e-8) # avoid dividing by zero
raw_grads = [self.am_grads(v), self.ph_grads(v)]
rotated_grad = [
-cplx.einsum("ijb,ijbg->bg", UrhoU_v, g, imag_part=False) for g in raw_grads
]
return [torch.einsum("b,bg->g", inv_UrhoU, g) for g in rotated_grad]
def am_grads(self, v):
r"""Computes the gradients of the amplitude RBM for given input states
:param v: The first input state, :math:`\sigma`
:type v: torch.Tensor
:returns: The gradients of all amplitude RBM parameters
:rtype: torch.Tensor
"""
return self.rbm_am.gamma_grad(v, v, eta=+1, expand=True) + self.pi_grad(
v, v, phase=False, expand=True
)
def ph_grads(self, v):
r"""Computes the gradients of the phase RBM for given input states
:param v: The first input state, :math:`\sigma`
:type v: torch.Tensor
:returns: The gradients of all phase RBM parameters
:rtype: torch.Tensor
"""
return cplx.scalar_mult( # need to multiply Gamma- by i
self.rbm_ph.gamma_grad(v, v, eta=-1, expand=True), cplx.I
) + self.pi_grad(v, v, phase=True, expand=True)
def fit(
self,
data,
epochs=100,
pos_batch_size=100,
neg_batch_size=None,
k=1,
lr=1,
input_bases=None,
progbar=False,
starting_epoch=1,
time=False,
callbacks=None,
optimizer=torch.optim.SGD,
optimizer_args=None,
scheduler=None,
scheduler_args=None,
**kwargs,
):
if input_bases is None:
raise ValueError("input_bases must be provided to train a DensityMatrix!")
else:
super().fit(
data=data,
epochs=epochs,
pos_batch_size=pos_batch_size,
neg_batch_size=neg_batch_size,
k=k,
lr=lr,
input_bases=input_bases,
progbar=progbar,
starting_epoch=starting_epoch,
time=time,
callbacks=callbacks,
optimizer=optimizer,
optimizer_args=optimizer_args,
scheduler=scheduler,
scheduler_args=scheduler_args,
**kwargs,
)
@staticmethod
def autoload(location, gpu=False):
state_dict = torch.load(location)
nn_state = DensityMatrix(
unitary_dict=state_dict["unitary_dict"],
num_visible=len(state_dict["rbm_am"]["visible_bias"]),
num_hidden=len(state_dict["rbm_am"]["hidden_bias"]),
num_aux=len(state_dict["rbm_am"]["aux_bias"]),
gpu=gpu,
)
nn_state.load(location)
return nn_state
| [
"torch.device",
"torch.cat",
"torch.einsum",
"torch.nn.functional.linear",
"torch.cuda.is_available",
"torch.load",
"torch.zeros_like"
] | 1.0 | ZvonimirBandic/QuCumber | 81f0291951e89346fd8ab5c35cc90341fd8acf35 |
1.8 | import torch
from torch import nn
from torch.nn import functional as F
from torchutils import to_device
class FocalLoss(nn.Module):
"""weighted version of Focal Loss"""
def __init__(self, alpha=.25, gamma=2, device=None):
super(FocalLoss, self).__init__()
self.alpha = torch.tensor([alpha, 1 - alpha])
# self.alpha = to_device(self.alpha, device=device)
self.gamma = gamma
def forward(self, inputs, targets):
BCE_loss = F.binary_cross_entropy(inputs, targets.float(), reduction='none')
targets = targets.long()
at = self.alpha.to(targets.device).gather(0, targets.view(-1))
pt = torch.exp(-BCE_loss)
F_loss = at * (1 - pt) ** self.gamma * BCE_loss
return F_loss.mean()
def binary_cross_entropy_weighted_focal_loss(y_pred, y_true, alpha=0.25, gamma=6, mask=None):
return FocalLoss(alpha=alpha, gamma=gamma, )(y_pred, y_true)
def cross_entropy_focal_loss(y_pred, y_true, weight=None, alpha=0.25, gamma=6, mask=None):
# important to add reduction='none' to keep per-batch-item loss
ce_loss = F.cross_entropy(y_pred, y_true, reduction='none', weight=weight)
pt = torch.exp(-ce_loss)
focal_loss = (alpha * (1 - pt) ** gamma * ce_loss).mean() # mean over the batch
return focal_loss
def binary_cross_entropy_focal_loss___(y_pred, y_true, alpha=0.25, gamma=6, mask=None):
# important to add reduction='none' to keep per-batch-item loss
ce_loss = F.binary_cross_entropy(y_pred, y_true, reduction='none')
pt = torch.exp(-ce_loss)
focal_loss = (alpha * (1 - pt) ** gamma * ce_loss).mean() # mean over the batch
return focal_loss
def bce_focal_loss(alpha=0.25, gamma=6):
def fn(y_pred, y_true, mask=None):
return binary_cross_entropy_focal_loss___(y_pred, y_true, alpha, gamma, mask=mask)
return fn
def ce_focal_loss(alpha=0.25, gamma=6):
def fn(y_pred, y_true, mask=None):
return cross_entropy_focal_loss(y_pred, y_true, alpha, gamma, mask=mask)
return fn
| [
"torch.nn.functional.cross_entropy",
"torch.tensor",
"torch.exp",
"torch.nn.functional.binary_cross_entropy"
] | 1.8.1 | tchaye59/torchutils | ca7b01bf63b6c3adaa36a4a66dfd87e927ef2460 |
1.1 | from __future__ import print_function
import errno
import os
from PIL import Image
import torch
import torch.nn as nn
import re
import json
import pickle as cPickle
import numpy as np
import utils
import h5py
import operator
import functools
from torch._six import string_classes
import torch.nn.functional as F
import collections
#from pycocotools.coco import COCO
# from scipy.sparse import coo_matrix
# from sklearn.metrics.pairwise import cosine_similarity
from torch.utils.data.dataloader import default_collate
EPS = 1e-7
def assert_eq(real, expected):
assert real == expected, '%s (true) vs %s (expected)' % (real, expected)
def assert_array_eq(real, expected):
assert (np.abs(real-expected) < EPS).all(), \
'%s (true) vs %s (expected)' % (real, expected)
def load_folder(folder, suffix):
imgs = []
for f in sorted(os.listdir(folder)):
if f.endswith(suffix):
imgs.append(os.path.join(folder, f))
return imgs
def load_imageid(folder):
images = load_folder(folder, 'jpg')
img_ids = set()
for img in images:
img_id = int(img.split('/')[-1].split('.')[0].split('_')[-1])
img_ids.add(img_id)
return img_ids
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def weights_init(m):
"""custom weights initialization."""
cname = m.__class__
if cname == nn.Linear or cname == nn.Conv2d or cname == nn.ConvTranspose2d:
m.weight.data.normal_(0.0, 0.02)
elif cname == nn.BatchNorm2d:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
else:
print('%s is not initialized.' % cname)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
class Logger(object):
def __init__(self, output_name):
dirname = os.path.dirname(output_name)
if not os.path.exists(dirname):
os.mkdir(dirname)
self.log_file = open(output_name, 'w')
self.infos = {}
def append(self, key, val):
vals = self.infos.setdefault(key, [])
vals.append(val)
def log(self, extra_msg=''):
msgs = [extra_msg]
for key, vals in self.infos.iteritems():
msgs.append('%s %.6f' % (key, np.mean(vals)))
msg = '\n'.join(msgs)
self.log_file.write(msg + '\n')
self.log_file.flush()
self.infos = {}
return msg
def write(self, msg):
self.log_file.write(msg + '\n')
self.log_file.flush()
print(msg)
def print_model(model, logger):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
if logger:
logger.write('nParams=\t'+str(nParams))
def save_model(path, model, epoch, optimizer=None):
model_dict = {
'epoch': epoch,
'model_state': model.state_dict()
}
if optimizer is not None:
model_dict['optimizer_state'] = optimizer.state_dict()
torch.save(model_dict, path)
def rho_select(pad, lengths):
# Index of the last output for each sequence.
idx_ = (lengths-1).view(-1,1).expand(pad.size(0), pad.size(2)).unsqueeze(1)
extracted = pad.gather(1, idx_).squeeze(1)
return extracted
def trim_collate(batch):
"Puts each data field into a tensor with outer dimension batch size"
_use_shared_memory = True
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
out = None
if 1 < batch[0].dim(): # image features
max_num_boxes = max([x.size(0) for x in batch])
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = len(batch) * max_num_boxes * batch[0].size(-1)
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
# warning: F.pad returns Variable!
return torch.stack([F.pad(x, (0,0,0,max_num_boxes-x.size(0))).data for x in batch], 0, out=out)
else:
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [trim_collate(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0]))))
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def mask_softmax(x, lengths): # , dim=1)
mask = torch.zeros_like(x).to(device=x.device, non_blocking=True)
t_lengths = lengths[:, :, None].expand_as(mask)
arange_id = torch.arange(mask.size(1)).to(device=x.device, non_blocking=True)
arange_id = arange_id[None, :, None].expand_as(mask)
mask[arange_id < t_lengths] = 1
# https://stackoverflow.com/questions/42599498/numercially-stable-softmax
# https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python
# exp(x - max(x)) instead of exp(x) is a trick
# to improve the numerical stability while giving
# the same outputs
x2 = torch.exp(x - torch.max(x))
x3 = x2 * mask
epsilon = 1e-5
x3_sum = torch.sum(x3, dim=1, keepdim=True) + epsilon
x4 = x3 / x3_sum.expand_as(x3)
return x4
class GradReverseMask(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, mask, weight):
"""
The mask should be composed of 0 or 1.
The '1' will get their gradient reversed..
"""
ctx.save_for_backward(mask)
ctx.weight = weight
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
mask, = ctx.saved_tensors
mask_c = mask.clone().detach().float()
mask_c[mask == 0] = 1.0
mask_c[mask == 1] = - float(ctx.weight)
return grad_output * mask_c[:, None].float(), None, None
def grad_reverse_mask(x, mask, weight=1):
return GradReverseMask.apply(x, mask, weight)
class GradReverse(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
return GradReverse.apply(x)
class GradMulConst(torch.autograd.Function):
"""
This layer is used to create an adversarial loss.
"""
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output * ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| [
"torch.Size",
"torch.stack",
"torch.max",
"torch.is_tensor",
"torch.save",
"torch.sparse.FloatTensor",
"torch.from_numpy",
"torch.DoubleTensor",
"torch.LongTensor",
"torch.load",
"torch.zeros_like",
"torch.utils.data.dataloader.default_collate",
"torch.sum"
] | 1.1.0 | Zhiquan-Wen/D-VQA | 688c4dcc811f49b431daea81406e628ec71a7247 |
1.8 | #
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains logic that captures T5 HuggingFace models into ONNX models.
Inspired by https://github.com/onnx/models/blob/master/text/machine_comprehension/t5/dependencies/T5-export.py
"""
from itertools import islice
# tensorrt
import tensorrt as trt
# polygraphy
from polygraphy.backend.trt import Profile
# torch
import torch
from torch.nn import Module
# huggingface
from transformers.generation_utils import GenerationMixin
from transformers.modeling_outputs import Seq2SeqLMOutput
# TRT-HuggingFace
from T5.T5ModelConfig import T5ModelTRTConfig
from NNDF.tensorrt_utils import clamp_weights_onnx_to_fp16_bounds, move_t5_cast_op
from NNDF.networks import NetworkMetadata, Precision
from NNDF.logger import G_LOGGER
from NNDF.models import (
TRTEngineFile,
TorchModelFile,
ONNXModelFile,
ModelFileConverter,
)
def add_extra_fp32(network_definition):
"""
Force operations involved in layer norm to run in FP32 precision.
"""
pow_ops = {}
for layer_index, layer in enumerate(network_definition[1]):
if layer.type == trt.LayerType.IDENTITY:
all_fp32 = all([layer.output_type_is_set(o) and layer.get_output_type(o) == trt.float32 for o in range(layer.num_outputs)])
if all_fp32:
if layer.get_input(0).dtype == trt.float32:
layer.precision = trt.float32
if layer.type == trt.LayerType.ELEMENTWISE:
layer.__class__ = getattr(trt, "IElementWiseLayer")
if layer.op == trt.ElementWiseOperation.POW:
pow_ops[layer] = layer_index
layer.precision = trt.float32
layer.set_output_type(0, trt.float32)
for _, index in pow_ops.items():
# Iterate from few layers before pow to include residual add and cast op.
# Iterate till 10 layers after pow op to include all operations included in layer norm.
START_OFFSET = 4
END_OFFSET = 12
for i in range(index-START_OFFSET, index+END_OFFSET):
l = network_definition[1].get_layer(i)
if l.type == trt.LayerType.REDUCE:
l.precision = trt.float32
l.set_output_type(0, trt.float32)
if l.type == trt.LayerType.ELEMENTWISE:
l.__class__ = getattr(trt, "IElementWiseLayer")
if l.op == trt.ElementWiseOperation.SUM:
l.precision = trt.float32
l.set_output_type(0, trt.float32)
if l.type == trt.LayerType.UNARY:
l.__class__ = getattr(trt, "IUnaryLayer")
if l.op == trt.UnaryOperation.SQRT:
l.precision = trt.float32
l.set_output_type(0, trt.float32)
if l.type == trt.LayerType.ELEMENTWISE:
l.__class__ = getattr(trt, "IElementWiseLayer")
if l.op == trt.ElementWiseOperation.DIV:
l.precision = trt.float32
l.set_output_type(0, trt.float32)
if l.type == trt.LayerType.ELEMENTWISE:
l.__class__ = getattr(trt, "IElementWiseLayer")
if l.op == trt.ElementWiseOperation.PROD:
l.precision = trt.float32
l.set_output_type(0, trt.float32)
return network_definition
# Torch File Encoding #
class T5DecoderTorchFile(TorchModelFile):
class TorchModule(Module, GenerationMixin):
"""
A simplied definition of T5 Decoder without support for loss.
Decoder with lm-head attached.
"""
def __init__(self, decoder, lm_head, config):
super().__init__()
self.decoder = decoder
self.lm_head = lm_head
self.config = config
def prepare_inputs_for_generation(self, input_ids, **kwargs):
return {
"input_ids": input_ids,
"encoder_hidden_states": kwargs["encoder_hidden_states"],
}
def forward(self, input_ids, encoder_hidden_states, **kwargs):
decoder_outputs = self.decoder(
input_ids=input_ids,
encoder_hidden_states=encoder_hidden_states,
**kwargs
)
# self.config.d_model ** -0.5 for rescaling output on vocab.
# as seen in https://huggingface.co/transformers/_modules/transformers/models/t5/modeling_t5.html#T5ForConditionalGeneration
sequence_output = decoder_outputs[0] * self.config.d_model ** -0.5
logits = self.lm_head(sequence_output)
if not kwargs.get("return_dict", False):
return (logits,) + decoder_outputs[1:]
return Seq2SeqLMOutput(logits=logits)
def __init__(self, model, network_metadata):
super().__init__(model, T5DecoderConverter, network_metadata)
class T5EncoderTorchFile(TorchModelFile):
"""Creation of a class to output only the last hidden state from the encoder."""
class TorchModule(Module, GenerationMixin):
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
def forward(self, *input, **kwargs):
return self.encoder(*input, **kwargs)[0]
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def __init__(self, model, network_metadata):
super().__init__(model, T5EncoderConverter, network_metadata)
# ONNX File Encoding #
class T5EncoderONNXFile(ONNXModelFile):
def __init__(self, model, network_metadata):
super().__init__(model, T5EncoderConverter, network_metadata)
class T5DecoderONNXFile(ONNXModelFile):
def __init__(self, model, network_metadata):
super().__init__(model, T5DecoderConverter, network_metadata)
# TRT Engine File Encoding #
class T5DecoderTRTEngine(TRTEngineFile):
DEFAULT_TRT_WORKSPACE_MB = 3072
def __init__(self, model, network_metadata, batch_size = 1):
super().__init__(model, T5DecoderConverter, network_metadata, batch_size = batch_size)
def get_network_definition(self, network_definition):
return add_extra_fp32(network_definition)
def get_dynamic_shape_profiles(self):
max_sequence_length = T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[
self.network_metadata.variant
]
profile = Profile()
profile.add(
"input_ids",
min=(self.batch_size, 1),
opt=(self.batch_size, max_sequence_length // 2),
max=(self.batch_size, max_sequence_length),
)
profile.add(
"encoder_hidden_states",
min=(self.batch_size, 1, max_sequence_length),
opt=(self.batch_size, max_sequence_length // 2, max_sequence_length),
max=(self.batch_size, max_sequence_length, max_sequence_length),
)
return [profile]
def use_obey_precision_constraints(self):
return self.network_metadata.precision.fp16
class T5EncoderTRTEngine(TRTEngineFile):
DEFAULT_TRT_WORKSPACE_MB = 2048
def __init__(self, model, network_metadata, batch_size = 1):
super().__init__(model, T5EncoderConverter, network_metadata, batch_size = batch_size)
def get_network_definition(self, network_definition):
return add_extra_fp32(network_definition)
def get_dynamic_shape_profiles(self):
max_sequence_length = T5ModelTRTConfig.MAX_SEQUENCE_LENGTH[
self.network_metadata.variant
]
return [
Profile().add(
"input_ids",
min=(self.batch_size, 1),
opt=(self.batch_size, max_sequence_length // 2),
max=(self.batch_size, max_sequence_length),
)
]
def use_obey_precision_constraints(self):
return self.network_metadata.precision.fp16
# Converters #
class T5DecoderConverter(ModelFileConverter):
def __init__(self):
super().__init__(T5DecoderTorchFile, T5DecoderONNXFile, T5DecoderTRTEngine)
def torch_to_onnx(
self, output_fpath: str, model: Module, network_metadata: NetworkMetadata
):
"""
Exports a given huggingface T5 to decoder architecture only.
Inspired by https://github.com/onnx/models/blob/master/text/machine_comprehension/t5/dependencies/T5-export.py
Args:
output_prefix (str): Path to the onnx file
model (torch.Model): Model loaded torch class
Returns:
T5DecoderONNXFile: ONNX decoder object.
"""
input_ids = torch.tensor([[42] * 10])
# Exporting the decoder requires a basic instance of the encoder
# Create one temporarily
simplified_encoder = T5EncoderTorchFile.TorchModule(model.encoder)
# Exports to ONNX
decoder_with_lm_head = T5DecoderTorchFile.TorchModule(
model.decoder, model.lm_head, model.config
)
# This code allows for huggingface compatible torch class to use onnx exporter
old_forward = decoder_with_lm_head.forward
def _export_forward(*args, **kwargs):
result = old_forward(*args, **kwargs)
return result[0]
decoder_with_lm_head.forward = _export_forward
inputs = T5ModelTRTConfig.get_input_dims(network_metadata)["decoder"]
outputs = T5ModelTRTConfig.get_output_dims(network_metadata)["decoder"]
torch.onnx.export(
decoder_with_lm_head,
(input_ids, simplified_encoder(input_ids)),
output_fpath,
export_params=True,
opset_version=12,
input_names=inputs.get_names(),
output_names=outputs.get_names(),
dynamic_axes={
**inputs.get_torch_dynamic_axis_encoding(),
**outputs.get_torch_dynamic_axis_encoding(),
},
training=False,
use_external_data_format=True
)
if network_metadata.precision.fp16:
G_LOGGER.debug("Clamping FP16 weights for T5")
move_t5_cast_op(output_fpath, output_fpath)
clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath)
return T5DecoderONNXFile(output_fpath, network_metadata)
class T5EncoderConverter(ModelFileConverter):
def __init__(self):
super().__init__(T5EncoderTorchFile, T5EncoderONNXFile, T5EncoderTRTEngine)
def onnx_to_trt(
self, output_fpath: str, input_fpath: str, network_metadata: NetworkMetadata, batch_size: int
):
"""
Override onnx_to_trt function from base.
Workaround: T5-base and T5-large are too large and cause FP16 to overflow. Encoder should not use FP16 tactics even in FP16 mode.
The perf decreases by less than 10% end-to-end. Usage with TRT is still substantial compared to frameworks.
"""
# Force encoder to FP32 only if variants are anything larger than small
# because of overflow and underflow issues
if network_metadata.precision.fp16 and network_metadata.variant != "t5-small":
network_metadata_cp_dct = network_metadata._asdict()
del network_metadata_cp_dct["precision"]
network_metadata = NetworkMetadata(**network_metadata_cp_dct, precision=Precision(fp16=False))
return super().onnx_to_trt(output_fpath, input_fpath, network_metadata, batch_size)
def torch_to_onnx(
self, output_fpath: str, model: Module, network_metadata: NetworkMetadata
):
"""
Exports a given huggingface T5 to encoder architecture only.
Inspired by https://github.com/onnx/models/blob/master/text/machine_comprehension/t5/dependencies/T5-export.py
Args:
output_prefix (str): Path to the onnx file
model (torch.Model): Model loaded torch class
Returns:
Tuple[str]: Names of generated models
"""
input_ids = torch.tensor([[42] * 10])
simplified_encoder = T5EncoderTorchFile.TorchModule(model.encoder)
inputs = T5ModelTRTConfig.get_input_dims(network_metadata)["encoder"]
outputs = T5ModelTRTConfig.get_output_dims(network_metadata)["encoder"]
# Exports to ONNX
torch.onnx._export(
simplified_encoder,
input_ids,
output_fpath,
export_params=True,
opset_version=12,
input_names=inputs.get_names(),
output_names=outputs.get_names(),
dynamic_axes={
**inputs.get_torch_dynamic_axis_encoding(),
**outputs.get_torch_dynamic_axis_encoding(),
},
training=False,
use_external_data_format=True
)
if network_metadata.precision.fp16:
G_LOGGER.debug("Clamping FP16 weights for T5")
move_t5_cast_op(output_fpath, output_fpath)
clamp_weights_onnx_to_fp16_bounds(output_fpath, output_fpath)
return T5EncoderONNXFile(output_fpath, network_metadata)
| [
"torch.tensor"
] | 1.8.1 | leo0519/TensorRT | 498dcb009fe4c2dedbe9c61044d3de4f3c04a41b |
0.3 | # pylint: disable=invalid-name
import glob
import os
import re
import time
import torch
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.training.trainer import Trainer, sparse_clip_norm, is_sparse
from allennlp.data import Vocabulary
from allennlp.common.params import Params
from allennlp.common.checks import ConfigurationError
from allennlp.models.simple_tagger import SimpleTagger
from allennlp.data.iterators import BasicIterator
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
class TestTrainer(AllenNlpTestCase):
def setUp(self):
super(TestTrainer, self).setUp()
self.instances = SequenceTaggingDatasetReader().read('tests/fixtures/data/sequence_tagging.tsv')
vocab = Vocabulary.from_instances(self.instances)
self.vocab = vocab
self.model_params = Params({
"text_field_embedder": {
"tokens": {
"type": "embedding",
"embedding_dim": 5
}
},
"encoder": {
"type": "lstm",
"input_size": 5,
"hidden_size": 7,
"num_layers": 2
}
})
self.model = SimpleTagger.from_params(self.vocab, self.model_params)
self.optimizer = torch.optim.SGD(self.model.parameters(), 0.01)
self.iterator = BasicIterator(batch_size=2)
self.iterator.index_with(vocab)
def test_trainer_can_run(self):
trainer = Trainer(model=self.model,
optimizer=self.optimizer,
iterator=self.iterator,
train_dataset=self.instances,
validation_dataset=self.instances,
num_epochs=2)
metrics = trainer.train()
assert 'best_validation_loss' in metrics
assert isinstance(metrics['best_validation_loss'], float)
assert 'best_epoch' in metrics
assert isinstance(metrics['best_epoch'], int)
# Making sure that both increasing and decreasing validation metrics work.
trainer = Trainer(model=self.model,
optimizer=self.optimizer,
iterator=self.iterator,
train_dataset=self.instances,
validation_dataset=self.instances,
validation_metric='+loss',
num_epochs=2)
metrics = trainer.train()
assert 'best_validation_loss' in metrics
assert isinstance(metrics['best_validation_loss'], float)
assert 'best_epoch' in metrics
assert isinstance(metrics['best_epoch'], int)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")
def test_trainer_can_run_cuda(self):
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=2,
cuda_device=0)
trainer.train()
@pytest.mark.skipif(torch.cuda.device_count() < 2,
reason="Need multiple GPUs.")
def test_trainer_can_run_multiple_gpu(self):
multigpu_iterator = BasicIterator(batch_size=4)
multigpu_iterator.index_with(self.vocab)
trainer = Trainer(self.model, self.optimizer,
multigpu_iterator, self.instances, num_epochs=2,
cuda_device=[0, 1])
trainer.train()
def test_trainer_can_resume_training(self):
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=1, serialization_dir=self.TEST_DIR)
trainer.train()
new_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=3, serialization_dir=self.TEST_DIR)
epoch, val_metrics_per_epoch = new_trainer._restore_checkpoint() # pylint: disable=protected-access
assert epoch == 1
assert len(val_metrics_per_epoch) == 1
assert isinstance(val_metrics_per_epoch[0], float)
assert val_metrics_per_epoch[0] != 0.
new_trainer.train()
def test_should_stop_early_with_increasing_metric(self):
new_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=3, serialization_dir=self.TEST_DIR,
patience=5, validation_metric="+test")
assert new_trainer._should_stop_early([.5, .3, .2, .1, .4, .4]) # pylint: disable=protected-access
assert not new_trainer._should_stop_early([.3, .3, .3, .2, .5, .1]) # pylint: disable=protected-access
def test_should_stop_early_with_decreasing_metric(self):
new_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances,
validation_dataset=self.instances,
num_epochs=3, serialization_dir=self.TEST_DIR,
patience=5, validation_metric="-test")
assert new_trainer._should_stop_early([.02, .3, .2, .1, .4, .4]) # pylint: disable=protected-access
assert not new_trainer._should_stop_early([.3, .3, .2, .1, .4, .5]) # pylint: disable=protected-access
def test_train_driver_raises_on_model_with_no_loss_key(self):
class FakeModel(torch.nn.Module):
def forward(self, **kwargs): # pylint: disable=arguments-differ,unused-argument
return {}
with pytest.raises(ConfigurationError):
trainer = Trainer(FakeModel(), self.optimizer,
self.iterator, self.instances,
num_epochs=2, serialization_dir=self.TEST_DIR)
trainer.train()
def test_trainer_can_log_histograms(self):
# enable activation logging
for module in self.model.modules():
module.should_log_activations = True
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=3,
serialization_dir=self.TEST_DIR,
histogram_interval=2)
trainer.train()
def test_trainer_respects_num_serialized_models_to_keep(self):
trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=5,
serialization_dir=self.TEST_DIR,
num_serialized_models_to_keep=3)
trainer.train()
# Now check the serialized files
for prefix in ['model_state_epoch_*', 'training_state_epoch_*']:
file_names = glob.glob(os.path.join(self.TEST_DIR, prefix))
epochs = [int(re.search(r"_([0-9])\.th", fname).group(1))
for fname in file_names]
assert sorted(epochs) == [2, 3, 4]
def test_trainer_respects_keep_serialized_model_every_num_seconds(self):
# To test:
# Create an iterator that sleeps for 0.5 second per epoch, so the total training
# time for one epoch is slightly greater then 0.5 seconds.
# Run for 6 epochs, keeping the last 2 models, models also kept every 1 second.
# Check the resulting checkpoints. Should then have models at epochs
# 2, 4, plus the last two at 5 and 6.
class WaitingIterator(BasicIterator):
# pylint: disable=arguments-differ
def _create_batches(self, *args, **kwargs):
time.sleep(0.5)
return super(WaitingIterator, self)._create_batches(*args, **kwargs)
iterator = WaitingIterator(batch_size=2)
iterator.index_with(self.vocab)
trainer = Trainer(self.model, self.optimizer,
iterator, self.instances, num_epochs=6,
serialization_dir=self.TEST_DIR,
num_serialized_models_to_keep=2,
keep_serialized_model_every_num_seconds=1)
trainer.train()
# Now check the serialized files
for prefix in ['model_state_epoch_*', 'training_state_epoch_*']:
file_names = glob.glob(os.path.join(self.TEST_DIR, prefix))
epochs = [int(re.search(r"_([0-9])\.th", fname).group(1))
for fname in file_names]
# epoch N has N-1 in file name
assert sorted(epochs) == [1, 3, 4, 5]
def test_trainer_saves_models_at_specified_interval(self):
iterator = BasicIterator(batch_size=4)
iterator.index_with(self.vocab)
trainer = Trainer(self.model, self.optimizer,
iterator, self.instances, num_epochs=2,
serialization_dir=self.TEST_DIR,
model_save_interval=0.0001)
trainer.train()
# Now check the serialized files for models saved during the epoch.
prefix = 'model_state_epoch_*'
file_names = sorted(glob.glob(os.path.join(self.TEST_DIR, prefix)))
epochs = [re.search(r"_([0-9\.\-]+)\.th", fname).group(1)
for fname in file_names]
# We should have checkpoints at the end of each epoch and during each, e.g.
# [0.timestamp, 0, 1.timestamp, 1]
assert len(epochs) == 4
assert epochs[3] == '1'
assert '.' in epochs[0]
# Now make certain we can restore from timestamped checkpoint.
# To do so, remove the checkpoint from the end of epoch 1&2, so
# that we are forced to restore from the timestamped checkpoints.
for k in range(2):
os.remove(os.path.join(self.TEST_DIR, 'model_state_epoch_{}.th'.format(k)))
os.remove(os.path.join(self.TEST_DIR, 'training_state_epoch_{}.th'.format(k)))
os.remove(os.path.join(self.TEST_DIR, 'best.th'))
restore_trainer = Trainer(self.model, self.optimizer,
self.iterator, self.instances, num_epochs=2,
serialization_dir=self.TEST_DIR,
model_save_interval=0.0001)
epoch, _ = restore_trainer._restore_checkpoint() # pylint: disable=protected-access
assert epoch == 2
# One batch per epoch.
assert restore_trainer._batch_num_total == 2 # pylint: disable=protected-access
class TestSparseClipGrad(AllenNlpTestCase):
def test_sparse_clip_grad(self):
# create a sparse embedding layer, then take gradient
embedding = torch.nn.Embedding(100, 16, sparse=True)
embedding.zero_grad()
ids = torch.autograd.Variable((torch.rand(17) * 100).long())
# Set some of the ids to the same value so that the sparse gradient
# has repeated indices. This tests some additional logic.
ids[:5] = 5
loss = embedding(ids).sum()
loss.backward()
assert is_sparse(embedding.weight.grad)
# Now try to clip the gradients.
_ = sparse_clip_norm([embedding.weight], 1.5)
# Final norm should be 1.5
grad = embedding.weight.grad.data.coalesce()
self.assertAlmostEqual(grad._values().norm(2.0), 1.5, places=5) # pylint: disable=protected-access
| [
"torch.rand",
"torch.cuda.is_available",
"torch.nn.Embedding",
"torch.cuda.device_count"
] | 0.3.1 | vidurj/allennlp | 5b513d4f7c7365ac33b3cbc557506b46a9b50450 |
1.0 | import torchbearer
from torchbearer.callbacks import Callback
import torch
class WeightDecay(Callback):
"""Create a WeightDecay callback which uses the given norm on the given parameters and with the given decay rate.
If params is None (default) then the parameters will be retrieved from the model.
Example: ::
>>> from torchbearer import Trial
>>> from torchbearer.callbacks import WeightDecay
# Example Trial which runs a trial with weight decay on the model
>>> decay = WeightDecay()
>>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)
Args:
rate (float): The decay rate or lambda
p (int): The norm level
params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a
single Tensor that will have gradients normalized, otherwise this is retrieved from state
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method
- :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented
"""
def __init__(self, rate=5e-4, p=2, params=None):
super(WeightDecay, self).__init__()
self.p = p
self.params = params
self.rate = rate
def on_start(self, state):
"""Retrieve params from state['model'] if required.
Args:
state (dict): The :class:`.Trial` state
"""
if self.params is None:
self.params = state[torchbearer.MODEL].parameters()
def on_criterion(self, state):
"""Calculate the decay term and add to state['loss'].
Args:
state (dict): The :class:`.Trial` state
"""
for param in self.params:
state[torchbearer.LOSS] += self.rate * torch.norm(param, self.p)
class L1WeightDecay(WeightDecay):
"""WeightDecay callback which uses an L1 norm with the given rate and parameters. If params is None (default) then
the parameters will be retrieved from the model.
Example: ::
>>> from torchbearer import Trial
>>> from torchbearer.callbacks import L1WeightDecay
# Example Trial which runs a trial with weight decay on the model using an L1 norm
>>> decay = L1WeightDecay()
>>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)
Args:
rate (float): The decay rate or lambda
params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a
single Tensor that will have gradients normalized, otherwise this is retrieved from state
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method
- :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented
"""
def __init__(self, rate=5e-4, params=None):
super(L1WeightDecay, self).__init__(rate=rate, p=1, params=params)
class L2WeightDecay(WeightDecay):
"""WeightDecay callback which uses an L2 norm with the given rate and parameters. If params is None (default) then
the parameters will be retrieved from the model.
Example: ::
>>> from torchbearer import Trial
>>> from torchbearer.callbacks import L2WeightDecay
# Example Trial which runs a trial with weight decay on the model using an L2 norm
>>> decay = L2WeightDecay()
>>> trial = Trial(None, callbacks=[decay], metrics=['loss'], verbose=2).for_steps(10).run(1)
Args:
rate (float): The decay rate or lambda
params (Iterable[Tensor] or Tensor, optional): an iterable of Tensors or a
single Tensor that will have gradients normalized, otherwise this is retrieved from state
State Requirements:
- :attr:`torchbearer.state.MODEL`: Model should have the `parameters` method
- :attr:`torchbearer.state.LOSS`: Loss should be a tensor that can be incremented
"""
def __init__(self, rate=5e-4, params=None):
super(L2WeightDecay, self).__init__(rate=rate, p=2, params=params)
| [
"torch.norm"
] | 1.0.0 | NunoEdgarGFlowHub/torchbearer | 940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0 |
1.0 | import torchbearer
from torchbearer import Callback
import torch
import torch.nn.functional as F
from torch.distributions import Beta
from torchbearer.bases import cite
bc = """
@inproceedings{tokozume2018between,
title={Between-class learning for image classification},
author={Tokozume, Yuji and Ushiku, Yoshitaka and Harada, Tatsuya},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={5486--5494},
year={2018}
}
"""
@cite(bc)
class BCPlus(Callback):
"""BC+ callback which mixes images by treating them as waveforms. For standard BC, see :class:`.Mixup`.
This callback can optionally convert labels to one hot before combining them according to the lambda parameters,
sampled from a beta distribution, use alpha=1 to replicate the paper. Use with :meth:`BCPlus.bc_loss` or set
`mixup_loss = True` and use :meth:`.Mixup.mixup_loss`.
.. note::
This callback first sets all images to have zero mean. Consider adding an offset (e.g. 0.5) back before
visualising.
Example: ::
>>> from torchbearer import Trial
>>> from torchbearer.callbacks import BCPlus
# Example Trial which does BCPlus regularisation
>>> bcplus = BCPlus(classes=10)
>>> trial = Trial(None, criterion=BCPlus.bc_loss, callbacks=[bcplus], metrics=['acc'])
Args:
mixup_loss (bool): If True, the lambda and targets will be stored for use with the mixup loss function.
alpha (float): The alpha value for the beta distribution.
classes (int): The number of classes for conversion to one hot.
State Requirements:
- :attr:`torchbearer.state.X`: State should have the current data stored and correctly normalised
- :attr:`torchbearer.state.Y_TRUE`: State should have the current data stored
"""
def __init__(self, mixup_loss=False, alpha=1, classes=-1):
super(BCPlus, self).__init__()
self.mixup_loss = mixup_loss
self.classes = classes
self.dist = Beta(torch.tensor([float(alpha)]), torch.tensor([float(alpha)]))
@staticmethod
def bc_loss(state):
"""The KL divergence between the outputs of the model and the ratio labels. Model ouputs should be un-normalised
logits as this function performs a log_softmax.
Args:
state: The current :class:`Trial` state.
"""
prediction, target = state[torchbearer.Y_PRED], state[torchbearer.Y_TRUE]
entropy = - (target[target.nonzero().split(1, dim=1)] * target[target.nonzero().split(1, dim=1)].log()).sum()
cross = - (target * F.log_softmax(prediction, dim=1)).sum()
return (cross - entropy) / prediction.size(0)
def _to_one_hot(self, target):
if target.dim() == 1:
target = target.unsqueeze(1)
one_hot = torch.zeros_like(target).repeat(1, self.classes)
one_hot.scatter_(1, target, 1)
return one_hot
return target.float()
def on_sample(self, state):
super(BCPlus, self).on_sample(state)
lam = self.dist.sample().to(state[torchbearer.DEVICE])
permutation = torch.randperm(state[torchbearer.X].size(0))
batch1 = state[torchbearer.X]
batch1 = batch1 - batch1.view(batch1.size(0), -1).mean(1, keepdim=True).view(*tuple([batch1.size(0)] + [1] * (batch1.dim() - 1)))
g1 = batch1.view(batch1.size(0), -1).std(1, keepdim=True).view(*tuple([batch1.size(0)] + [1] * (batch1.dim() - 1)))
batch2 = batch1[permutation]
g2 = g1[permutation]
p = 1. / (1 + ((g1 / g2) * ((1 - lam) / lam)))
state[torchbearer.X] = (batch1 * p + batch2 * (1 - p)) / (p.pow(2) + (1 - p).pow(2)).sqrt()
if not self.mixup_loss:
target = self._to_one_hot(state[torchbearer.TARGET]).float()
state[torchbearer.Y_TRUE] = lam * target + (1 - lam) * target[permutation]
else:
state[torchbearer.MIXUP_LAMBDA] = lam
state[torchbearer.MIXUP_PERMUTATION] = permutation
state[torchbearer.Y_TRUE] = (state[torchbearer.Y_TRUE], state[torchbearer.Y_TRUE][state[torchbearer.MIXUP_PERMUTATION]])
def on_sample_validation(self, state):
super(BCPlus, self).on_sample_validation(state)
if not self.mixup_loss:
state[torchbearer.TARGET] = self._to_one_hot(state[torchbearer.TARGET]).float()
| [
"torch.zeros_like",
"torch.nn.functional.log_softmax"
] | 1.0.0 | NunoEdgarGFlowHub/torchbearer | 940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0 |
1.0 | import unittest
from mock import Mock, call
from torchbearer.metrics import RunningMean, Metric, RunningMetric, Mean, Std, Var
import torch
class TestVar(unittest.TestCase):
def test_variance_dim(self):
var = Var('test', dim=0)
var.process(torch.Tensor([[1., 2.], [3., 4.]]))
var.process(torch.Tensor([[4., 3.], [2., 1.]]))
var.process(torch.Tensor([[1., 1.], [1., 1.]]))
res = var.process_final()
self.assertTrue(len(res) == 2)
for m in res:
self.assertTrue(abs(m - 1.6000) < 0.0001)
class TestStd(unittest.TestCase):
def setUp(self):
self._metric = Metric('test')
self._metric.process = Mock()
self._metric.process.side_effect = [torch.zeros(torch.Size([])),
torch.FloatTensor([0.1, 0.2, 0.3]),
torch.FloatTensor([0.4, 0.5, 0.6]),
torch.FloatTensor([0.7, 0.8, 0.9]),
torch.ones(torch.Size([]))]
self._std = Std('test', unbiased=False)
self._std.reset({})
self._target = 0.31622776601684
def test_train(self):
self.setUp()
self._std.train()
for i in range(5):
self._std.process(self._metric.process())
result = self._std.process_final({})
self.assertAlmostEqual(self._target, result, places=5)
def test_validate(self):
self.setUp()
self._std.eval()
for i in range(5):
self._std.process(self._metric.process())
result = self._std.process_final({})
self.assertAlmostEqual(self._target, result, places=5)
def test_precision_error(self):
self.setUp()
self._std.train()
val = torch.tensor([0.55])
for i in range(2):
self._std.process(val)
result = self._std.process_final({})
self.assertEqual(0, result)
def setUpMoreDims(self):
self._metric = Metric('test')
self._metric.process = Mock()
self._metric.process.side_effect = [torch.zeros(torch.Size([])),
torch.FloatTensor([[0.1, 0.2, 0.3], [1.1, 1.2, 1.3]]),
torch.FloatTensor([[0.4, 0.5, 0.6], [1.4, 1.5, 1.6]]),
torch.FloatTensor([[0.7, 0.8, 0.9], [1.7, 1.8, 1.9]]),
torch.ones(torch.Size([]))]
self._std = Std('test', unbiased=False)
self._std.reset({})
self._target = 0.57662804083742
def test_more_dims(self):
self.setUpMoreDims()
for i in range(5):
self._std.process(self._metric.process())
result = self._std.process_final({})
self.assertAlmostEqual(self._target, result, places=5)
def test_std_dim(self):
std = Std('test', dim=0)
std.process(torch.Tensor([[1., 2.], [3., 4.]]))
std.process(torch.Tensor([[4., 3.], [2., 1.]]))
std.process(torch.Tensor([[1., 1.], [1., 1.]]))
res = std.process_final()
self.assertTrue(len(res) == 2)
for m in res:
self.assertTrue(abs(m - 1.2649) < 0.0001)
class TestMean(unittest.TestCase):
def setUp(self):
self._metric = Metric('test')
self._metric.process = Mock()
self._metric.process.side_effect = [torch.zeros(torch.Size([])),
torch.FloatTensor([0.1, 0.2, 0.3]),
torch.FloatTensor([0.4, 0.5, 0.6]),
torch.FloatTensor([0.7, 0.8, 0.9]),
torch.ones(torch.Size([]))]
self._mean = Mean('test')
self._mean.reset({})
self._target = 0.5
def test_train_dict(self):
self.setUp()
self._mean.train()
for i in range(5):
self._mean.process(self._metric.process())
result = self._mean.process_final({})
self.assertAlmostEqual(self._target, result, places=5)
def test_validate_dict(self):
self.setUp()
self._mean.eval()
for i in range(5):
self._mean.process(self._metric.process())
result = self._mean.process_final({})
self.assertAlmostEqual(self._target, result, places=5)
def setUpMoreDims(self):
self._metric = Metric('test')
self._metric.process = Mock()
self._metric.process.side_effect = [torch.zeros(torch.Size([])),
torch.FloatTensor([[0.1, 0.2, 0.3], [1.1, 1.2, 1.3]]),
torch.FloatTensor([[0.4, 0.5, 0.6], [1.4, 1.5, 1.6]]),
torch.FloatTensor([[0.7, 0.8, 0.9], [1.7, 1.8, 1.9]]),
torch.ones(torch.Size([]))]
self._mean = Mean('test')
self._mean.reset({})
self._target = 0.95
def test_more_dims(self):
self.setUpMoreDims()
for i in range(5):
self._mean.process(self._metric.process())
result = self._mean.process_final({})
self.assertAlmostEqual(self._target, result, places=5)
def test_mean_dim(self):
mean = Mean('test', dim=0)
mean.process(torch.Tensor([[1., 2.], [3., 4.]]))
mean.process(torch.Tensor([[4., 3.], [2., 1.]]))
mean.process(torch.Tensor([[1., 1.], [1., 1.]]))
res = mean.process_final()
self.assertTrue(len(res) == 2)
for m in res:
self.assertTrue(abs(m - 2.0) < 0.0001)
class TestRunningMetric(unittest.TestCase):
def setUp(self):
self._metric = RunningMetric('test', batch_size=5, step_size=5)
self._metric.reset({})
self._metric._process_train = Mock(return_value=3)
self._metric._step = Mock(return_value='output')
def test_train_called_with_state(self):
self._metric.train()
self._metric.process({'test': -1})
self._metric._process_train.assert_called_with({'test': -1})
def test_cache_one_step(self):
self._metric.train()
for i in range(6):
self._metric.process({})
self._metric._step.assert_has_calls([call([3]), call([3, 3, 3, 3, 3])])
def test_empty_methods(self):
metric = RunningMetric('test')
self.assertRaises(NotImplementedError, lambda: metric._step(['test']) is None)
self.assertRaises(NotImplementedError, lambda: metric._process_train(['test']) is None)
class TestRunningMean(unittest.TestCase):
def setUp(self):
self._metric = Metric('test')
self._mean = RunningMean('test')
self._cache = [torch.Tensor([1.0]), torch.Tensor([1.5]), torch.Tensor([2.0])]
self._target = 1.5
def test_train(self):
result = self._mean._process_train(torch.FloatTensor([1.0, 1.5, 2.0]))
self.assertAlmostEqual(self._target, result, 3, 0.002)
def test_step(self):
result = self._mean._step(self._cache)
self.assertEqual(self._target, result)
def test_dims(self):
mean = RunningMean('test', dim=0)
cache = [mean._process_train(torch.Tensor([[1., 2.], [3., 4.]])),
mean._process_train(torch.Tensor([[4., 3.], [2., 1.]])),
mean._process_train(torch.Tensor([[1., 1.], [1., 1.]]))]
res = mean._step(cache)
self.assertTrue(len(res) == 2)
for m in res:
self.assertTrue(abs(m - 2.0) < 0.0001)
| [
"torch.FloatTensor",
"torch.Tensor",
"torch.tensor",
"torch.Size"
] | 1.0.0 | NunoEdgarGFlowHub/torchbearer | 940e75ec88acd59d5a97aa8c721f7cfa30a5c4d0 |
1.3 | """Test torch algo utility functions."""
import numpy as np
import pytest
import tensorflow as tf
import torch
import torch.nn.functional as F
import metarl.tf.misc.tensor_utils as tf_utils
import metarl.torch.algos._utils as torch_algo_utils
from tests.fixtures import TfGraphTestCase
def stack(d, arr):
"""Stack 'arr' 'd' times."""
return np.repeat(np.expand_dims(arr, axis=0), repeats=d, axis=0)
ONES = np.ones((4, 6))
ZEROS = np.zeros((4, 6))
ARRANGE = stack(4, np.arange(6))
PI_DIGITS = stack(4, [3, 1, 4, 1, 5, 9])
E_DIGITS = stack(4, [2, 7, 1, 8, 2, 8])
FIBS = stack(4, [1, 1, 2, 3, 5, 8])
nums_1d = np.arange(0, 4).astype(float)
nums_2d = np.arange(0, 4).astype(float).reshape(2, 2)
nums_3d = np.arange(0, 8).astype(float).reshape(2, 2, 2)
class TestTorchAlgoUtils(TfGraphTestCase):
"""Test class for torch algo utility functions."""
# yapf: disable
@pytest.mark.parametrize('gae_lambda, rewards_val, baselines_val', [
(0.4, ONES, ZEROS),
(0.8, PI_DIGITS, ARRANGE),
(1.2, ONES, FIBS),
(1.7, E_DIGITS, PI_DIGITS),
])
# yapf: enable
def testcompute_advantages(self, gae_lambda, rewards_val, baselines_val):
"""Test compute_advantage function."""
discount = 0.99
max_len = rewards_val.shape[-1]
torch_advs = torch_algo_utils.compute_advantages(
discount, gae_lambda, max_len, torch.Tensor(baselines_val),
torch.Tensor(rewards_val))
rewards = tf.compat.v1.placeholder(dtype=tf.float32,
name='reward',
shape=[None, None])
baselines = tf.compat.v1.placeholder(dtype=tf.float32,
name='baseline',
shape=[None, None])
adv = tf_utils.compute_advantages(discount, gae_lambda, max_len,
baselines, rewards)
tf_advs = self.sess.run(adv,
feed_dict={
rewards: rewards_val,
baselines: baselines_val,
})
assert np.allclose(torch_advs.numpy(),
tf_advs.reshape(torch_advs.shape),
atol=1e-5)
def test_add_padding_last_1d(self):
"""Test pad_to_last function for 1d."""
max_length = 10
expected = F.pad(torch.Tensor(nums_1d),
(0, max_length - nums_1d.shape[-1]))
tensor_padding = torch_algo_utils.pad_to_last(nums_1d,
total_length=max_length)
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_1d,
total_length=10,
axis=0)
assert expected.eq(tensor_padding).all()
def test_add_padding_last_2d(self):
"""Test pad_to_last function for 2d."""
max_length = 10
tensor_padding = torch_algo_utils.pad_to_last(nums_2d, total_length=10)
expected = F.pad(torch.Tensor(nums_2d),
(0, max_length - nums_2d.shape[-1]))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_2d,
total_length=10,
axis=0)
expected = F.pad(torch.Tensor(nums_2d),
(0, 0, 0, max_length - nums_2d.shape[0]))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_2d,
total_length=10,
axis=1)
expected = F.pad(torch.Tensor(nums_2d),
(0, max_length - nums_2d.shape[-1], 0, 0))
assert expected.eq(tensor_padding).all()
def test_add_padding_last_3d(self):
"""Test pad_to_last function for 3d."""
max_length = 10
tensor_padding = torch_algo_utils.pad_to_last(nums_3d, total_length=10)
expected = F.pad(torch.Tensor(nums_3d),
(0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_3d,
total_length=10,
axis=0)
expected = F.pad(torch.Tensor(nums_3d),
(0, 0, 0, 0, 0, max_length - nums_3d.shape[0]))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_3d,
total_length=10,
axis=1)
expected = F.pad(torch.Tensor(nums_3d),
(0, 0, 0, max_length - nums_3d.shape[-1], 0, 0))
assert expected.eq(tensor_padding).all()
tensor_padding = torch_algo_utils.pad_to_last(nums_3d,
total_length=10,
axis=2)
expected = F.pad(torch.Tensor(nums_3d),
(0, max_length - nums_3d.shape[-1], 0, 0, 0, 0))
assert expected.eq(tensor_padding).all()
@pytest.mark.parametrize('nums', [nums_1d, nums_2d, nums_3d])
def test_out_of_index_error(self, nums):
"""Test pad_to_last raises IndexError."""
with pytest.raises(IndexError):
torch_algo_utils.pad_to_last(nums,
total_length=10,
axis=len(nums.shape))
def testmake_optimizer_with_type(self):
"""Test make_optimizer function with type as first argument."""
optimizer_type = torch.optim.Adam
module = torch.nn.Linear(2, 1)
lr = 0.123
optimizer = torch_algo_utils.make_optimizer(optimizer_type,
module,
lr=lr)
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == lr
def testmake_optimizer_with_tuple(self):
"""Test make_optimizer function with tuple as first argument."""
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
optimizer = torch_algo_utils.make_optimizer(optimizer_type, module)
assert isinstance(optimizer, optimizer_type)
assert optimizer.defaults['lr'] == optimizer_type[1]['lr']
def testmake_optimizer_raise_value_error(self):
"""Test make_optimizer raises value error."""
optimizer_type = (torch.optim.Adam, {'lr': 0.1})
module = torch.nn.Linear(2, 1)
with pytest.raises(ValueError):
_ = torch_algo_utils.make_optimizer(optimizer_type,
module,
lr=0.123)
| [
"torch.nn.Linear",
"torch.Tensor"
] | 1.3.0 | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d |
1.3 | """GaussianMLPModule."""
import abc
import numpy as np
import torch
from torch import nn
from metarl.torch.distributions import TanhNormal
from metarl.torch.modules.mlp_module import MLPModule
from metarl.torch.modules.multi_headed_mlp_module import MultiHeadedMLPModule
class TanhGaussianMLPBaseModule2(nn.Module):
"""
GaussianMLPModel.
Args:
input_dim (int): Input dimension of the model.
output_dim (int): Output dimension of the model.
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a torch.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
learn_std (bool): Is std trainable.
init_std (float): Initial value for std.
(plain value - not log or exponentiated).
adaptive_std (bool): Is std a neural network. If False, it will be a
parameter.
std_share_network (bool): Boolean for whether mean and std share
the same network.
std_hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for std. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
min_std (float): If not None, the std is at least the value of min_std,
to avoid numerical issues (plain value - not log or exponentiated).
max_std (float): If not None, the std is at most the value of max_std,
to avoid numerical issues (plain value - not log or exponentiated).
std_hidden_nonlinearity: Nonlinearity for each hidden layer in
the std network.
std_output_w_init (callable): Initializer function for the weight
of output dense layer(s) in the std network.
std_parametrization (str): How the std should be parametrized. There
are two options:
- exp: the logarithm of the std will be stored, and applied a
exponential transformation
- softplus: the std will be computed as log(1+exp(x))
layer_normalization (bool): Bool for using layer normalization or not.
"""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=None,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=1e-6,
max_std=None,
std_hidden_sizes=(32, 32),
std_hidden_nonlinearity=nn.ReLU,
std_hidden_w_init=nn.init.xavier_uniform_,
std_hidden_b_init=nn.init.zeros_,
std_output_w_init=nn.init.xavier_uniform_,
std_parameterization='exp',
layer_normalization=False):
super().__init__()
self._input_dim = input_dim
self._hidden_sizes = hidden_sizes
self._action_dim = output_dim
self._learn_std = learn_std
self._std_hidden_sizes = std_hidden_sizes
self._min_std = min_std
self._max_std = max_std
self._std_hidden_nonlinearity = std_hidden_nonlinearity
self._std_hidden_w_init = std_hidden_w_init
self._std_hidden_b_init = std_hidden_b_init
self._std_output_nonlinearity = torch.tanh
self._std_output_w_init = std_output_w_init
self._std_parameterization = std_parameterization
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
if self._std_parameterization not in ('exp', 'softplus'):
raise NotImplementedError
init_std_param = torch.Tensor([init_std]).log()
if self._learn_std:
self._init_std = torch.nn.Parameter(init_std_param)
else:
self._init_std = init_std_param
self.register_buffer('init_std', self._init_std)
self._min_std_param = self._max_std_param = None
if min_std is not None:
self._min_std_param = torch.Tensor([min_std]).log()
self.register_buffer('min_std_param', self._min_std_param)
if max_std is not None:
self._max_std_param = torch.Tensor([max_std]).log()
self.register_buffer('max_std_param', self._max_std_param)
def to(self, *args, **kwargs):
super().to(*args, **kwargs)
buffers = dict(self.named_buffers())
if not isinstance(self._init_std, torch.nn.Parameter):
self._init_std = buffers['init_std']
self._min_std_param = buffers['min_std_param']
self._max_std_param = buffers['max_std_param']
@abc.abstractmethod
def _get_mean_and_log_std(self, inputs):
pass
def forward(self, inputs):
"""Forward method."""
mean, log_std_uncentered = self._get_mean_and_log_std(inputs)
# requires that std nonlinearity is tanh
log_std_uncentered = self._min_std_param + 0.5 * (self._max_std_param - self._min_std_param) * (log_std_uncentered + 1.)
if self._std_parameterization == 'exp':
std = log_std_uncentered.exp()
else:
std = log_std_uncentered.exp().exp().add(1.).log()
dist = TanhNormal(mean, std)
return dist
def _to_scalar_if_not_none(self, tensor):
return None if tensor is None else tensor.item()
class TanhGaussianMLPTwoHeadedModule2(TanhGaussianMLPBaseModule2):
"""GaussianMLPModule which has only one mean network."""
def __init__(self,
input_dim,
output_dim,
hidden_sizes=(32, 32),
hidden_nonlinearity=nn.ReLU,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
output_nonlinearity=nn.ReLU,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
learn_std=True,
init_std=1.0,
min_std=np.exp(-20.),
max_std=np.exp(2.),
std_parameterization='exp',
layer_normalization=False):
super(TanhGaussianMLPTwoHeadedModule2,
self).__init__(input_dim=input_dim,
output_dim=output_dim,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
learn_std=learn_std,
init_std=init_std,
min_std=min_std,
max_std=max_std,
std_parameterization=std_parameterization,
layer_normalization=layer_normalization)
self._shared_mean_log_std_network = MultiHeadedMLPModule(
n_heads=2,
input_dim=self._input_dim,
output_dims=self._action_dim,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearities=[None, nn.Tanh],
output_w_inits=self._output_w_init,
output_b_inits=[
nn.init.zeros_,
lambda x: nn.init.constant_(x, self._init_std.item())
],
layer_normalization=self._layer_normalization)
def _get_mean_and_log_std(self, inputs):
return self._shared_mean_log_std_network(inputs)
| [
"torch.Tensor",
"torch.nn.Parameter"
] | 1.3.0 | icml2020submission6857/metarl | 9b66cefa2b6bcb6a38096d629ce8853b47c7171d |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Transformer Agents.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.agents import Agent
from parlai.utils.torch import padded_3d
from parlai.core.torch_classifier_agent import TorchClassifierAgent
from parlai.core.torch_ranker_agent import TorchRankerAgent
from parlai.core.torch_generator_agent import TorchGeneratorAgent
from parlai.utils.misc import recursive_getattr
from parlai.utils.logging import logging
from .modules import (
TransformerMemNetModel,
TransformerGeneratorModel,
TransformerLinearWrapper,
MixerModel,
MixerGeneratorModel,
)
import torch
def add_common_cmdline_args(parser):
"""
Add common command line args.
"""
parser.add_argument(
'-esz',
'--embedding-size',
type=int,
default=300,
help='Size of all embedding layers. Must be a multiple of --n-heads.',
)
parser.add_argument(
'-nl', '--n-layers', type=int, default=2, help='Number of transformer layers.'
)
parser.add_argument(
'-hid',
'--ffn-size',
type=int,
default=300,
help='Hidden size of the FFN layers',
)
parser.add_argument(
'--dropout',
type=float,
default=0.0,
help='Dropout used around embeddings and before layer layer normalizations. '
'This is used in Vaswani 2017 and works well on large datasets.',
)
parser.add_argument(
'--attention-dropout',
type=float,
default=0.0,
help='Dropout used after attention softmax. This is not used in Vaswani 2017.',
)
parser.add_argument(
'--relu-dropout',
type=float,
default=0.0,
help='Dropout used after the ReLU in the FFN. Not used in Vaswani 2017, '
'but used in Tensor2Tensor.',
)
parser.add_argument(
'--n-heads', type=int, default=2, help='Number of multihead attention heads'
)
parser.add_argument(
'--learn-positional-embeddings',
type='bool',
default=False,
help='If off, sinusoidal embeddings are used. If on, position embeddings are '
'learned from scratch.',
)
parser.add_argument('--embeddings-scale', type='bool', default=True)
parser.add_argument(
'--n-positions',
type=int,
default=None,
hidden=True,
help='Number of positional embeddings to learn. Defaults '
'to truncate or 1024 if not provided.',
)
parser.add_argument(
'--n-segments',
type=int,
default=0,
help='The number of segments that support the model. '
'If zero no segment and no langs_embedding.',
)
parser.add_argument(
'--variant',
choices={'aiayn', 'xlm', 'prelayernorm', 'bart'},
default='aiayn',
help='Chooses locations of layer norms, etc. prelayernorm '
'is used to match some fairseq models',
recommended='xlm',
)
parser.add_argument(
'--activation',
choices={'relu', 'gelu'},
default='relu',
help='Nonlinear activation to use. AIAYN uses relu, but '
'more recent papers prefer gelu.',
recommended='gelu',
)
parser.add_argument(
'--output-scaling',
type=float,
default=1.0,
help='scale the output of every transformer by this quantity.',
)
parser.add_argument(
'--share-word-embeddings',
type='bool',
default=True,
help='Share word embeddings table for candidate and context'
'in the memory network',
)
parser.add_argument(
'-nel',
'--n-encoder-layers',
type=int,
default=-1,
help='This will overide the n-layers for asymmetrical transformers',
)
parser.add_argument(
'-ndl',
'--n-decoder-layers',
type=int,
default=-1,
help='This will overide the n-layers for asymmetrical transformers',
)
parser.add_argument(
'--model-parallel',
type='bool',
default=False,
help='Shard the layers across multiple GPUs.',
)
class Transformer(Agent):
"""
Placeholder Transformer Agent.
Placeholder class, which just throws an error telling the user to specify whether
they want the ranker or the generator.
"""
def __init__(self, opt, shared=None):
raise RuntimeError(
"`--model transformer` is not a valid choice. Please select either "
"`--model transformer/ranker` or `--model transformer/generator"
)
class TransformerRankerAgent(TorchRankerAgent):
"""
Transformer Ranker Agent.
Implementation of a TorchRankerAgent, where the model is a Transformer
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
super().add_cmdline_args(parser, partial_opt=partial_opt)
agent = parser.add_argument_group('Transformer Arguments')
add_common_cmdline_args(agent)
# memory and knowledge arguments
agent.add_argument(
'--use-memories',
type='bool',
default=False,
help='use memories: must implement the function '
'`_vectorize_memories` to use this',
)
agent.add_argument(
'--wrap-memory-encoder',
type='bool',
default=False,
help='wrap memory encoder with MLP',
)
agent.add_argument(
'--memory-attention',
type=str,
default='sqrt',
choices=['cosine', 'dot', 'sqrt'],
help='similarity for basic attention mechanism '
'when using transformer to encode memories',
)
# model specific arguments
agent.add_argument('--normalize-sent-emb', type='bool', default=False)
agent.add_argument('--share-encoders', type='bool', default=True)
parser.add_argument(
'--share-word-embeddings',
type='bool',
default=True,
help='Share word embeddings table for candidate and context'
'in the memory network',
)
agent.add_argument(
'--learn-embeddings', type='bool', default=True, help='learn embeddings'
)
agent.add_argument(
'--data-parallel',
type='bool',
default=False,
help='use model in data parallel, requires ' 'multiple gpus',
)
agent.add_argument(
'--reduction-type',
type=str,
default='mean',
choices=['first', 'max', 'mean'],
help='Type of reduction at the end of transformer',
)
parser.set_defaults(learningrate=0.0001, optimizer='adamax', truncate=1024)
cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)
return agent
def _score(self, output, cands):
if cands.dim() == 2:
return torch.matmul(output, cands.t())
elif cands.dim() == 3:
return torch.bmm(output.unsqueeze(1), cands.transpose(1, 2)).squeeze(1)
else:
raise RuntimeError(
'Unexpected candidate dimensions {}' ''.format(cands.dim())
)
def build_model(self, states=None):
"""
Build and return model.
"""
model = MixerModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(model.embeddings.weight, self.opt['embedding_type'])
return model
def batchify(self, obs_batch, sort=False):
"""
Override so that we can add memories to the Batch object.
"""
batch = super().batchify(obs_batch, sort)
if self.opt['use_memories']:
valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if self.is_valid(ex)]
valid_inds, exs = zip(*valid_obs)
mems = None
if any('memory_vecs' in ex for ex in exs):
mems = [ex.get('memory_vecs', None) for ex in exs]
batch.memory_vecs = mems
return batch
def _vectorize_memories(self, obs):
# TODO: move this to Torch Ranker Agent
raise NotImplementedError(
'Abstract class: user must implement this function to use memories'
)
def vectorize(self, *args, **kwargs):
"""
Override to include vectorization of memories.
"""
kwargs['add_start'] = False
kwargs['add_end'] = False
obs = super().vectorize(*args, **kwargs)
if self.opt['use_memories']:
obs = self._vectorize_memories(obs)
return obs
def encode_candidates(self, padded_cands):
"""
Encode candidates.
"""
_, cands = self.model(xs=None, mems=None, cands=padded_cands)
return cands
def score_candidates(self, batch, cand_vecs, cand_encs=None):
"""
Score candidates.
"""
# convoluted check that not all memories are empty
if (
self.opt['use_memories']
and batch.memory_vecs is not None
and sum(len(m) for m in batch.memory_vecs)
):
mems = padded_3d(batch.memory_vecs, pad_idx=self.NULL_IDX)
else:
mems = None
if cand_encs is not None:
# we pre-encoded the candidates, do not re-encode here
cand_vecs = None
context_h, cands_h = self.model(xs=batch.text_vec, mems=mems, cands=cand_vecs)
if cand_encs is not None:
cands_h = cand_encs
scores = self._score(context_h, cands_h)
return scores
class TransformerGeneratorAgent(TorchGeneratorAgent):
"""
TransformerGeneratorAgent.
Implementation of TorchGeneratorAgent, where the model is a Transformer
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add command-line arguments specifically for this agent.
"""
agent = parser.add_argument_group('Transformer Arguments')
add_common_cmdline_args(agent)
cls.dictionary_class().add_cmdline_args(parser, partial_opt=partial_opt)
super().add_cmdline_args(parser, partial_opt=partial_opt)
return agent
def build_model(self, states=None):
"""
Build and return model.
"""
model = MixerGeneratorModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
model.encoder.embeddings.weight, self.opt['embedding_type']
)
return model
def _resize_token_embeddings(self, state_dict, msg=None):
"""
Resize the token embeddings when are adding extra special tokens.
"""
# map extra special tokens carefully
new_size = self.model.embeddings.weight.size()[0]
orig_size = state_dict['embeddings.weight'].size()[0]
logging.info(f'Resizing token embeddings from {orig_size} to {new_size}')
if new_size <= orig_size:
# new size should be greater than original size,
# as we are adding special tokens
raise RuntimeError(msg)
for emb_weights in [
'embeddings.weight',
'encoder.embeddings.weight',
'decoder.embeddings.weight',
]:
# get new_embs
old_embs = state_dict[emb_weights]
new_embs = recursive_getattr(self.model, emb_weights).to(old_embs.device)
# copy over old weights
new_embs.data[:orig_size, :] = old_embs.data[:orig_size, :]
# reset in state dict
state_dict[emb_weights] = new_embs
return state_dict
class TransformerClassifierAgent(TorchClassifierAgent):
"""
Classifier based on Transformer.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
TransformerRankerAgent.add_cmdline_args(
parser, partial_opt=partial_opt
) # add transformer args
super().add_cmdline_args(parser, partial_opt=partial_opt)
parser.add_argument(
'--load-from-pretrained-ranker',
type='bool',
default=False,
help='load model from base transformer ranking model '
'(used for pretraining)',
)
parser.set_defaults(reduction_type='first')
return parser
def build_model(self):
num_classes = len(self.class_list)
self.base_model = MixerModel(self.opt, self.dict)
return TransformerLinearWrapper(self.base_model.context_encoder, num_classes)
def vectorize(self, *args, **kwargs):
"""
Add the start and end token to the text.
"""
kwargs['add_start'] = True
kwargs['add_end'] = True
obs = super().vectorize(*args, **kwargs)
return obs
def _set_text_vec(self, *args, **kwargs):
"""
Add the start and end token to the text.
"""
obs = super()._set_text_vec(*args, **kwargs)
if 'text_vec' in obs and 'added_start_end' not in obs:
obs.force_set(
'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True)
)
obs['added_start_end'] = True
# check truncation after adding start end tokens
if obs.get('text_vec') is not None:
truncated_vec = self._check_truncate(
obs['text_vec'], self.text_truncate, True
)
obs.force_set('text_vec', torch.LongTensor(truncated_vec))
return obs
def score(self, batch):
return self.model(batch.text_vec)
def load_state_dict(self, state_dict):
"""
Load the state dict into model.
This is easily overridable to facilitate transfer of state dicts.
"""
if self.is_finetune and self.opt['load_from_pretrained_ranker']:
self.base_model.load_state_dict(state_dict, strict=False)
else:
self.model.load_state_dict(state_dict)
| [
"torch.LongTensor"
] | 1.4.0 | dongfangyixi/ParlAI | 424a2b3c7086593f699c76612dffd1d925986177 |
0.4 | import os
import sys
import numpy as np
import pandas as pd
from torch.utils.data import Subset
from torch.utils.data.dataset import Dataset # For custom datasets
from torchvision import transforms
PROJECT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(PROJECT_PATH)
from src.base.torchvision_dataset import TorchvisionDataset
from src.datasets.preprocessing import get_target_label_idx
from src.datasets.data_splitter import DatasetDivider
from src.datasets.data_set_generic import Dataset
class HitsDataset(TorchvisionDataset):
def __init__(self, root: str, normal_class=1):
super().__init__(root)
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = tuple([normal_class])
self.outlier_classes = list(range(0, 2))
self.outlier_classes.remove(normal_class)
self.data_dict = pd.read_pickle(self.root)
# hardcoded selected channel
images = self.normalize_by_image(self.data_dict['images'])[..., 3][
..., np.newaxis]
labels = np.array(self.data_dict['labels'])
dataset = Dataset(data_array=images, data_label=labels, batch_size=50)
data_splitter = DatasetDivider(test_size=0.3, validation_size=0.1)
data_splitter.set_dataset_obj(dataset)
train_dataset, test_dataset, val_dataset = \
data_splitter.get_train_test_val_set_objs()
transform = transforms.Compose([transforms.ToTensor()])
target_transform = transforms.Lambda(
lambda x: int(x in self.outlier_classes))
train_set = Hits(train_dataset.data_array, train_dataset.data_label,
transform=transform, target_transform=target_transform)
train_idx_normal = get_target_label_idx(
np.array(train_set.label_arr), self.normal_classes)
self.train_set = Subset(train_set, train_idx_normal)
print(self.train_set.__len__())
self.val_all_set = Hits(val_dataset.data_array, val_dataset.data_label,
transform=transform,
target_transform=target_transform)
val_idx_normal = get_target_label_idx(
np.array(self.val_all_set.label_arr), self.normal_classes)
self.val_normal_set = Subset(self.val_all_set, val_idx_normal)
print(self.val_normal_set.__len__())
self.test_set = Hits(test_dataset.data_array, test_dataset.data_label,
transform=transform,
target_transform=target_transform)
def normalize_by_image(self, images):
images -= np.nanmin(images, axis=(1, 2))[:, np.newaxis, np.newaxis, :]
images = images / np.nanmax(images, axis=(1, 2))[
:, np.newaxis, np.newaxis, :]
return images
class Hits(Dataset):
def __init__(self, images, labels, transform, target_transform):
"""
"""
# Transforms
self.transform = transform
self.target_transform = target_transform
self.image_arr = images
self.label_arr = labels
print(self.image_arr.shape)
self.data_len = self.label_arr.shape[0]
def __getitem__(self, index):
single_image = self.image_arr[index]
single_image_label = self.label_arr[index]
if self.transform is not None:
img = self.transform(single_image)
if self.target_transform is not None:
target = self.target_transform(single_image_label)
return img, target, index # only line changed
def __len__(self):
return self.data_len
| [
"torch.utils.data.Subset"
] | 0.4.1 | ReyesDeJong/Deep-SVDD-PyTorch | 1fc7eae1474556f869d5c5422da74fd4fe2f1aed |
1.0 | # -*- coding: utf-8 -*-
"""
Romanization of Thai words based on machine-learnt engine ("thai2rom")
"""
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pythainlp.corpus import download, get_corpus_path
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ThaiTransliterator:
def __init__(self):
"""
Transliteration of Thai words
Now supports Thai to Latin (romanization)
"""
# Download the model, if it's not on your machine.
self.__filemodel = get_corpus_path("thai2rom-pytorch-attn")
if not self.__filemodel:
download("thai2rom-pytorch-attn")
self.__filemodel = get_corpus_path("thai2rom-pytorch-attn")
loader = torch.load(self.__filemodel, map_location=device)
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT = loader["encoder_params"]
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT = loader["decoder_params"]
self._maxlength = 100
self._char_to_ix = loader["char_to_ix"]
self._ix_to_char = loader["ix_to_char"]
self._target_char_to_ix = loader["target_char_to_ix"]
self._ix_to_target_char = loader["ix_to_target_char"]
# encoder/ decoder
# Restore the model and construct the encoder and decoder.
self._encoder = Encoder(
INPUT_DIM, E_EMB_DIM, E_HID_DIM, E_DROPOUT)
self._decoder = AttentionDecoder(
OUTPUT_DIM, D_EMB_DIM, D_HID_DIM, D_DROPOUT
)
self._network = Seq2Seq(
self._encoder,
self._decoder,
self._target_char_to_ix["<start>"],
self._target_char_to_ix["<end>"],
self._maxlength,
).to(device)
self._network.load_state_dict(loader["model_state_dict"])
self._network.eval()
def _prepare_sequence_in(self, text: str):
"""
Prepare input sequence for PyTorch
"""
idxs = []
for ch in text:
if ch in self._char_to_ix:
idxs.append(self._char_to_ix[ch])
else:
idxs.append(self._char_to_ix["<UNK>"])
idxs.append(self._char_to_ix["<end>"])
tensor = torch.tensor(idxs, dtype=torch.long)
return tensor.to(device)
def romanize(self, text: str) -> str:
"""
:param str text: Thai text to be romanized
:return: English (more or less) text that spells out how the Thai text
should be pronounced.
"""
input_tensor = self._prepare_sequence_in(text).view(1, -1)
input_length = [len(text) + 1]
target_tensor_logits = self._network(input_tensor,
input_length,
None, 0)
# Seq2seq model returns <END> as the first token,
# As a result, target_tensor_logits.size() is torch.Size([0])
if target_tensor_logits.size(0) == 0:
target = ["<PAD>"]
else:
target_tensor = (
torch.argmax(
target_tensor_logits.squeeze(1),
1).cpu().numpy()
)
target = [self._ix_to_target_char[t] for t in target_tensor]
return "".join(target)
class Encoder(nn.Module):
def __init__(self, vocabulary_size, embedding_size,
hidden_size, dropout=0.5):
"""Constructor"""
super(Encoder, self).__init__()
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(vocabulary_size,
embedding_size)
self.rnn = nn.LSTM(
input_size=embedding_size,
hidden_size=hidden_size // 2,
bidirectional=True,
batch_first=True,
)
self.dropout = nn.Dropout(dropout)
def forward(self, sequences, sequences_lengths):
# sequences: (batch_size, sequence_length=MAX_LENGTH)
# sequences_lengths: (batch_size)
batch_size = sequences.size(0)
self.hidden = self.init_hidden(batch_size)
sequences_lengths = np.sort(sequences_lengths)[::-1]
index_sorted = np.argsort(
-sequences_lengths
) # use negation in sort in descending order
index_unsort = np.argsort(index_sorted) # to unsorted sequence
index_sorted = torch.from_numpy(index_sorted)
sequences = sequences.index_select(0, index_sorted.to(device))
sequences = self.character_embedding(sequences)
sequences = self.dropout(sequences)
sequences_packed = nn.utils.rnn.pack_padded_sequence(
sequences, sequences_lengths.copy(), batch_first=True
)
sequences_output, self.hidden = self.rnn(sequences_packed,
self.hidden)
sequences_output, _ = nn.utils.rnn.pad_packed_sequence(
sequences_output, batch_first=True
)
index_unsort = torch.from_numpy(index_unsort).to(device)
sequences_output = sequences_output.index_select(
0, index_unsort.clone().detach()
)
return sequences_output, self.hidden
def init_hidden(self, batch_size):
h_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
c_0 = torch.zeros(
[2, batch_size, self.hidden_size // 2], requires_grad=True
).to(device)
return (h_0, c_0)
class Attn(nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
if self.method == "general":
self.attn = nn.Linear(self.hidden_size, hidden_size)
elif self.method == "concat":
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))
def forward(self, hidden, encoder_outputs, mask):
# Calculate energies for each encoder output
if self.method == "dot":
attn_energies = torch.bmm(encoder_outputs,
hidden.transpose(1, 2)).squeeze(2)
elif self.method == "general":
attn_energies = self.attn(
encoder_outputs.view(-1, encoder_outputs.size(-1))
) # (batch_size * sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies.view(
*encoder_outputs.size()), hidden.transpose(1, 2)
).squeeze(2) # (batch_size, sequence_len)
elif self.method == "concat":
attn_energies = self.attn(
torch.cat((
hidden.expand(*encoder_outputs.size()),
encoder_outputs
), 2)
) # (batch_size, sequence_len, hidden_size)
attn_energies = torch.bmm(
attn_energies,
self.other.unsqueeze(0).expand(*hidden.size()).transpose(1, 2),
).squeeze(2)
attn_energies = attn_energies.masked_fill(mask == 0, -1e10)
# Normalize energies to weights in range 0 to 1
return F.softmax(attn_energies, 1)
class AttentionDecoder(nn.Module):
def __init__(self, vocabulary_size, embedding_size,
hidden_size, dropout=0.5):
"""Constructor"""
super(AttentionDecoder, self).__init__()
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.character_embedding = nn.Embedding(vocabulary_size,
embedding_size)
self.rnn = nn.LSTM(
input_size=embedding_size + self.hidden_size,
hidden_size=hidden_size,
bidirectional=False,
batch_first=True,
)
self.attn = Attn(method="general", hidden_size=self.hidden_size)
self.linear = nn.Linear(hidden_size, vocabulary_size)
self.dropout = nn.Dropout(dropout)
def forward(self, input, last_hidden, encoder_outputs, mask):
""""Defines the forward computation of the decoder"""
# input: (batch_size, 1)
# last_hidden: (batch_size, hidden_dim)
# encoder_outputs: (batch_size, sequence_len, hidden_dim)
# mask: (batch_size, sequence_len)
hidden = last_hidden.permute(1, 0, 2)
attn_weights = self.attn(hidden, encoder_outputs, mask)
context_vector = attn_weights.unsqueeze(1).bmm(encoder_outputs)
context_vector = torch.sum(context_vector, dim=1)
context_vector = context_vector.unsqueeze(1)
embedded = self.character_embedding(input)
embedded = self.dropout(embedded)
rnn_input = torch.cat((context_vector, embedded), -1)
output, hidden = self.rnn(rnn_input)
output = output.view(-1, output.size(2))
x = self.linear(output)
return x, hidden[0], attn_weights
class Seq2Seq(nn.Module):
def __init__(
self, encoder, decoder, target_start_token,
target_end_token, max_length
):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.pad_idx = 0
self.target_start_token = target_start_token
self.target_end_token = target_end_token
self.max_length = max_length
assert encoder.hidden_size == decoder.hidden_size
def create_mask(self, source_seq):
mask = source_seq != self.pad_idx
return mask
def forward(
self, source_seq, source_seq_len, target_seq, teacher_forcing_ratio=0.5
):
# source_seq: (batch_size, MAX_LENGTH)
# source_seq_len: (batch_size, 1)
# target_seq: (batch_size, MAX_LENGTH)
batch_size = source_seq.size(0)
start_token = self.target_start_token
end_token = self.target_end_token
max_len = self.max_length
target_vocab_size = self.decoder.vocabulary_size
outputs = torch.zeros(max_len,
batch_size,
target_vocab_size).to(device)
if target_seq is None:
assert teacher_forcing_ratio == 0, "Must be zero during inference"
inference = True
else:
inference = False
encoder_outputs, encoder_hidden = self.encoder(source_seq,
source_seq_len)
decoder_input = (
torch.tensor([[start_token] * batch_size]).view(batch_size,
1).to(device)
)
encoder_hidden_h_t = torch.cat(
[encoder_hidden[0][0], encoder_hidden[0][1]], dim=1
).unsqueeze(dim=0)
decoder_hidden = encoder_hidden_h_t
max_source_len = encoder_outputs.size(1)
mask = self.create_mask(source_seq[:, 0:max_source_len])
for di in range(max_len):
decoder_output, decoder_hidden, _ = self.decoder(
decoder_input, decoder_hidden, encoder_outputs, mask
)
topv, topi = decoder_output.topk(1)
outputs[di] = decoder_output.to(device)
teacher_force = random.random() < teacher_forcing_ratio
decoder_input = (
target_seq[:, di].reshape(batch_size, 1)
if teacher_force
else topi.detach()
)
if inference and decoder_input == end_token:
return outputs[:di]
return outputs
_THAI_TO_ROM = ThaiTransliterator()
def romanize(text: str) -> str:
return _THAI_TO_ROM.romanize(text)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.functional.softmax",
"torch.FloatTensor",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.load",
"torch.nn.Embedding",
"torch.sum"
] | 1.0.0 | Subarna578/pythainlp | 9650a40396719284add17bb09f50e948dea41053 |
1.8 | """This lobe enables the integration of huggingface pretrained wav2vec2/hubert/wavlm models.
Reference: https://arxiv.org/abs/2006.11477
Reference: https://arxiv.org/abs/1904.05862
Reference: https://arxiv.org/abs/2110.13900
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
Authors
* Titouan Parcollet 2021
* Boumadane Abdelmoumene 2021
"""
import os
import torch
import logging
import pathlib
import numpy as np
import torch.nn.functional as F
from torch import nn
from huggingface_hub import model_info
from speechbrain.pretrained.fetching import fetch
# We check if transformers is installed.
try:
import transformers
from transformers import Wav2Vec2Model, HubertModel, WavLMModel, Data2VecAudioModel
from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig, Data2VecAudioConfig
from transformers import Wav2Vec2FeatureExtractor
from transformers import Wav2Vec2ForPreTraining
from transformers.models.wav2vec2.modeling_wav2vec2 import (
_compute_mask_indices,
)
except ImportError:
MSG = "Please install transformers from HuggingFace to use wav2vec2 / Hubert\n"
MSG += "E.G. run: pip install transformers"
raise ImportError(MSG)
logger = logging.getLogger(__name__)
HF_models = {
"wav2vec2": Wav2Vec2Model,
"hubert": HubertModel,
"wavlm": WavLMModel,
"data2vec": Data2VecAudioModel
}
HF_config = {
"wav2vec2": Wav2Vec2Config,
"hubert": HubertConfig,
"wavlm": WavLMConfig,
"data2vec": Data2VecAudioConfig
}
class HuggingFaceWav2Vec2(nn.Module):
"""This lobe enables the integration of HuggingFace and SpeechBrain
pretrained wav2vec2.0/Hubert models.
Source paper wav2vec2.0: https://arxiv.org/abs/2006.11477
Source paper Hubert: https://arxiv.org/abs/2106.07447
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The model can be used as a fixed feature extractor or can be finetuned. It
will download automatically the model from HuggingFace or use a local path.
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
output_norm : bool (default: True)
If True, a layer_norm (affine) will be applied to the output obtained
from the wav2vec model.
freeze : bool (default: True)
If True, the model is frozen. If False, the model will be trained
alongside with the rest of the pipeline.
freeze_feature_extractor : bool (default: False)
When freeze = False and freeze_feature_extractor True, the featue_extractor module of the model is Frozen. If False
all the wav2vec model will be trained including featue_extractor module.
apply_spec_augment : bool (default: False)
If True, the model will apply spec augment on the output of feature extractor
(inside huggingface Wav2VecModel() class).
If False, the model will not apply spec augment. We set this to false to prevent from doing it twice.
Example
-------
>>> inputs = torch.rand([10, 600])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2(model_hub, save_path)
>>> outputs = model(inputs)
"""
def __init__(
self,
source,
save_path,
output_norm=True,
freeze=True,
freeze_feature_extractor=False,
apply_spec_augment=False,
load_pretrained_weights=True,
):
super().__init__()
# Download the extractor from HuggingFace.
# The extractor is only used to retrieve the normalisation information
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
source, cache_dir=save_path
)
# Select specific self-supervised loader (eg. Wav2Vec2, Hubert)
if "hubert" in source:
config = HF_config.get("hubert")
model = HF_models.get("hubert")
elif "wavlm" in source:
config = HF_config.get("wavlm")
model = HF_models.get("wavlm")
elif "data2vec" in source:
config = HF_config.get("data2vec")
model = HF_models.get("data2vec")
else:
config = HF_config.get("wav2vec2")
model = HF_models.get("wav2vec2")
# Download and load the model
self._from_pretrained(
source, config=config, model=model, save_path=save_path, load_weights=load_pretrained_weights
)
# set apply_spec_augment
self.model.config.apply_spec_augment = apply_spec_augment
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
self.normalize_wav = self.feature_extractor.do_normalize
self.freeze = freeze
self.freeze_feature_extractor = freeze_feature_extractor
self.output_norm = output_norm
if self.freeze:
logger.warning(
"speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen."
)
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
else:
self.model.train()
if self.freeze_feature_extractor:
self.model.feature_extractor._freeze_parameters()
def _from_pretrained(self, source, config, model, save_path, load_weights):
"""This function manages the source checking and loading of the params.
# 1. Is the model from HF or a local path
# 2. Is the model pretrained with HF or SpeechBrain
# 3. Download (if appropriate) and load with respect to 1. and 2.
"""
is_sb, ckpt_file = self._check_model_source(source)
if not load_weights:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
elif is_sb:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
self.model.gradient_checkpointing_disable() # Required by DDP
# fetch the checkpoint file
ckpt_full_path = fetch(
filename=ckpt_file, source=source, savedir=save_path
)
# We transfer the parameters from the checkpoint.
self._load_sb_pretrained_w2v2_parameters(ckpt_full_path)
else:
if load_weights:
self.model = model.from_pretrained(source, cache_dir=save_path)
else:
self.model=model()
def _load_sb_pretrained_w2v2_parameters(self, path):
"""Loads the parameter of a w2v2 model pretrained with SpeechBrain and the
HuggingFaceWav2Vec2Pretrain Object. It is necessary to perform a custom
loading because HuggingFace adds a level to the checkpoint when storing
the model breaking the compatibility between HuggingFaceWav2Vec2Pretrain
and HuggingFaceWav2Vec2.
In practice a typical HuggingFaceWav2Vec2 checkpoint for a given parameter
would be: model.conv.weight.data while for HuggingFaceWav2Vec2Pretrain it
is: model.wav2vec2.weight.data (wav2vec2 must be removed before loading).
"""
modified_state_dict = {}
orig_state_dict = torch.load(path, map_location="cpu")
# We remove the .wav2vec2 in the state dict.
for key, params in orig_state_dict.items():
if "wav2vec2." in key:
save_key = key.replace("model.wav2vec2.", "")
modified_state_dict[save_key] = params
incompatible_keys = self.model.load_state_dict(
modified_state_dict, strict=False
)
for missing_key in incompatible_keys.missing_keys:
logger.warning(
f"During parameter transfer to {self.model} loading from "
+ f"{path}, the transferred parameters did not have "
+ f"parameters for the key: {missing_key}"
)
for unexpected_key in incompatible_keys.unexpected_keys:
logger.warning(
f"The param with the key: {unexpected_key} is discarded as it "
+ "is useless for wav2vec 2.0 finetuning."
)
def _check_model_source(self, path):
"""Checks if the pretrained model has been trained with SpeechBrain and
is hosted locally or on a HuggingFace hub.
"""
checkpoint_filename = ""
source = pathlib.Path(path)
is_local = True
is_sb = True
# If path is a huggingface hub.
if not source.exists():
is_local = False
if is_local:
# Test for HuggingFace model
if any(File.endswith(".bin") for File in os.listdir(path)):
is_sb = False
return is_sb, checkpoint_filename
# Test for SpeechBrain model and get the filename.
for File in os.listdir(path):
if File.endswith(".ckpt"):
checkpoint_filename = os.path.join(path, File)
is_sb = True
return is_sb, checkpoint_filename
else:
files = model_info(
path
).siblings # get the list of files of the Hub
# Test if it's an HuggingFace model or a SB one
for File in files:
if File.rfilename.endswith(".ckpt"):
checkpoint_filename = File.rfilename
is_sb = True
return is_sb, checkpoint_filename
for File in files:
if File.rfilename.endswith(".bin"):
checkpoint_filename = File.rfilename
is_sb = False
return is_sb, checkpoint_filename
err_msg = f"{path} does not contain a .bin or .ckpt checkpoint !"
raise FileNotFoundError(err_msg)
def forward(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
# If we freeze, we simply remove all grads and features from the graph.
if self.freeze:
with torch.no_grad():
return self.extract_features(wav).detach()
return self.extract_features(wav)
def extract_features(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape)
# Extract wav2vec output
out = self.model(wav)[0]
# We normalize the output if required
if self.output_norm:
out = F.layer_norm(out, out.shape)
return out
class HuggingFaceWav2Vec2Pretrain(nn.Module):
"""This lobe enables the integration of HuggingFace
wav2vec2.0 models to be pretrained.
Source paper: https://arxiv.org/abs/2006.11477
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The return is an HuggingFace format and the mask indices that contains:
https://huggingface.co/transformers/model_doc/wav2vec2.html#wav2vec2forpretraining
For instance, it returns the loss that can be accessed with .loss
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
mask_prob : float (default: 0.65)
Probability of masking a given frame. Default is taken from the paper.
mask_length : float (default: 10)
Length (i.e. number of consecutive masked frames). Default is taken from
the paper.
Example
-------
>>> inputs = torch.rand([10, 32000])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2Pretrain(model_hub, save_path)
>>> outputs, _ = model(inputs)
"""
def __init__(
self,
source,
save_path,
mask_prob=0.65,
mask_length=10,
normalize_wav=True,
):
super().__init__()
self.mask_prob = mask_prob
self.mask_length = mask_length
self.normalize_wav = normalize_wav
# Download the config of the model from HuggingFace.
self.config = Wav2Vec2Config.from_pretrained(
source, cache_dir=save_path
)
self.config.output_hidden_states = (
True # We want the hidden states as well!
)
self.model = Wav2Vec2ForPreTraining(self.config)
self.model.gradient_checkpointing_disable() # Required by DDP
self.model.train()
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
def forward(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
batch_size, raw_sequence_length = wav.shape
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape)
sequence_length = self.model._get_feat_extract_output_lengths(
raw_sequence_length
)
# 1. Compute the indices that will be masked
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.mask_prob,
mask_length=self.mask_length,
)
torch_mask_time_indices = torch.tensor(
mask_time_indices, device=wav.device, dtype=torch.long,
)
# 2. Sample the negative samples from the entire sequence.
# Fairseq does it only on the masked indices, but this only work if you
# have long sentences. For more versatily, we sample on the entire sequence.
# value.
full_sentence_indices = np.ones((batch_size, sequence_length))
# print(np.sum(mask_time_indices, axis=1))
negative_sample_indices = torch.tensor(
transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices(
(batch_size, sequence_length),
num_negatives=self.config.num_negatives,
mask_time_indices=full_sentence_indices,
),
device=wav.device,
dtype=torch.long,
)
return (
self.model(
wav,
mask_time_indices=torch_mask_time_indices,
sampled_negative_indices=negative_sample_indices,
),
torch_mask_time_indices,
)
| [
"torch.nn.functional.layer_norm",
"torch.no_grad",
"torch.tensor",
"torch.load"
] | 1.8.0 | RaphaelOlivier/speechbrain | 142dc6caa4b46ca4c9341b0cd39627f489808749 |
0.4 | from urllib.request import urlopen
import torch
from torch import nn
import numpy as np
from skimage.morphology import label
import os
from HD_BET.paths import folder_with_parameter_files
def get_params_fname(fold):
return os.path.join(folder_with_parameter_files, "%d.model" % fold)
def maybe_download_parameters(fold=0, force_overwrite=False):
"""
Downloads the parameters for some fold if it is not present yet.
:param fold:
:param force_overwrite: if True the old parameter file will be deleted (if present) prior to download
:return:
"""
assert 0 <= fold <= 4, "fold must be between 0 and 4"
if not os.path.isdir(folder_with_parameter_files):
maybe_mkdir_p(folder_with_parameter_files)
out_filename = get_params_fname(fold)
if force_overwrite and os.path.isfile(out_filename):
os.remove(out_filename)
if not os.path.isfile(out_filename):
url = "https://zenodo.org/record/2540695/files/%d.model?download=1" % fold
print("Downloading", url, "...")
data = urlopen(url).read()
with open(out_filename, 'wb') as f:
f.write(data)
def init_weights(module):
if isinstance(module, nn.Conv3d):
module.weight = nn.init.kaiming_normal(module.weight, a=1e-2)
if module.bias is not None:
module.bias = nn.init.constant(module.bias, 0)
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
class SetNetworkToVal(object):
def __init__(self, use_dropout_sampling=False, norm_use_average=True):
self.norm_use_average = norm_use_average
self.use_dropout_sampling = use_dropout_sampling
def __call__(self, module):
if isinstance(module, nn.Dropout3d) or isinstance(module, nn.Dropout2d) or isinstance(module, nn.Dropout):
module.train(self.use_dropout_sampling)
elif isinstance(module, nn.InstanceNorm3d) or isinstance(module, nn.InstanceNorm2d) or \
isinstance(module, nn.InstanceNorm1d) \
or isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.BatchNorm3d) or \
isinstance(module, nn.BatchNorm1d):
module.train(not self.norm_use_average)
def postprocess_prediction(seg):
# basically look for connected components and choose the largest one, delete everything else
print("running postprocessing... ")
mask = seg != 0
lbls = label(mask, connectivity=mask.ndim)
lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]
largest_region = np.argmax(lbls_sizes[1:]) + 1
seg[lbls != largest_region] = 0
return seg
def subdirs(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isdir(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = lambda x, y: y
res = [l(folder, i) for i in os.listdir(folder) if os.path.isfile(os.path.join(folder, i))
and (prefix is None or i.startswith(prefix))
and (suffix is None or i.endswith(suffix))]
if sort:
res.sort()
return res
subfolders = subdirs # I am tired of confusing those
def maybe_mkdir_p(directory):
splits = directory.split("/")[1:]
for i in range(0, len(splits)):
if not os.path.isdir(os.path.join("/", *splits[:i+1])):
os.mkdir(os.path.join("/", *splits[:i+1]))
| [
"torch.nn.init.constant",
"torch.nn.init.kaiming_normal",
"torch.exp"
] | 0.4.1 | evertdeman/HD-BET | 817a50d2fe9b8663646cc74652cb50e26f343a3b |
1.9 | import os
from collections import defaultdict
import numpy as np
import torch
from termcolor import colored
from torch.utils.tensorboard import SummaryWriter
from common import utils
class Manager():
def __init__(self, model, optimizer, scheduler, params, dataloaders, logger):
# params status
self.params = params
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.dataloaders = dataloaders
self.logger = logger
self.epoch = 0
self.step = 0
self.best_val_score = np.inf
self.cur_val_score = np.inf
self.best_test_score = np.inf
self.cur_test_score = np.inf
# train status
self.train_status = defaultdict(utils.AverageMeter)
# val status
self.val_status = defaultdict(utils.AverageMeter)
# test status
self.test_status = defaultdict(utils.AverageMeter)
# model status
self.loss_status = defaultdict(utils.AverageMeter)
# init local tensorboard and html
self.init_tb_and_html()
def init_tb_and_html(self):
# tensorboard loss
local_tb_dir = os.path.join(self.params.model_dir, "summary/loss")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_loss_writter = SummaryWriter(log_dir=local_tb_dir)
# tensorboard metric
local_tb_dir = os.path.join(self.params.model_dir, "summary/metric")
os.makedirs(local_tb_dir, exist_ok=True)
self.local_metric_writter = SummaryWriter(log_dir=local_tb_dir)
# html
local_html_dir = os.path.join(self.params.model_dir, "summary/html")
os.makedirs(local_html_dir, exist_ok=True)
self.local_html_dir = local_html_dir
def update_step(self):
self.step += 1
def update_epoch(self):
self.epoch += 1
def update_loss_status(self, loss, batch_size):
for k, v in loss.items():
self.loss_status[k].update(val=v.item(), num=batch_size)
def update_metric_status(self, metrics, split, batch_size):
if split == "val":
for k, v in metrics.items():
self.val_status[k].update(val=v.item(), num=batch_size)
self.cur_val_score = self.val_status[self.params.major_metric].avg
elif split == "test":
for k, v in metrics.items():
self.test_status[k].update(val=v.item(), num=batch_size)
self.cur_test_score = self.test_status[self.params.major_metric].avg
else:
raise ValueError("Wrong eval type: {}".format(split))
def summarize_metric_status(self, metrics, split):
if split == "val":
for k in metrics:
if k.endswith('MSE'):
self.val_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.val_status[k].avg))
else:
continue
elif split == "test":
for k in metrics:
if k.endswith('MSE'):
self.test_status[k[:-3] + 'RMSE'].set(val=np.sqrt(self.test_status[k].avg))
else:
continue
else:
raise ValueError("Wrong eval type: {}".format(split))
def reset_loss_status(self):
for k, v in self.loss_status.items():
self.loss_status[k].reset()
def reset_metric_status(self, split):
if split == "val":
for k, v in self.val_status.items():
self.val_status[k].reset()
elif split == "test":
for k, v in self.test_status.items():
self.test_status[k].reset()
else:
raise ValueError("Wrong split string: {}".format(split))
def print_train_info(self):
exp_name = self.params.model_dir.split('/')[-1]
print_str = "{} Epoch: {:4d}, lr={:.4f} ".format(exp_name, self.epoch, self.scheduler.get_last_lr()[0])
print_str += "total loss: %.4f(%.4f)" % (self.loss_status['total'].val, self.loss_status['total'].avg)
return print_str
def print_metrics(self, split, title="Eval", color="red", only_best=False):
if split == "val":
metric_status = self.val_status
is_best = self.cur_val_score < self.best_val_score
elif split == "test":
metric_status = self.test_status
is_best = self.cur_test_score < self.best_test_score
else:
raise ValueError("Wrong split string: {}".format(split))
print_str = " | ".join("{}: {:4g}".format(k, v.avg) for k, v in metric_status.items())
if only_best:
if is_best:
self.logger.info(colored("Best Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
else:
self.logger.info(colored("Epoch: {}, {} Results: {}".format(self.epoch, title, print_str), color, attrs=["bold"]))
def write_loss_to_tb(self, split):
for k, v in self.loss_status.items():
if split == "train":
self.local_loss_writter.add_scalar("train_Loss/{}".format(k), v.val, self.step)
elif split == "val":
self.local_loss_writter.add_scalar("val_Loss/{}".format(k), v.val, self.step)
elif split == "test":
self.local_loss_writter.add_scalar("test_Loss/{}".format(k), v.val, self.step)
else:
raise ValueError("Wrong split string: {}".format(split))
def write_metric_to_tb(self, split):
if split == "val":
for k, v in self.val_status.items():
self.local_metric_writter.add_scalar("val_Metric/{}".format(k), v.avg, self.epoch)
elif split == "test":
for k, v in self.test_status.items():
self.local_metric_writter.add_scalar("test_Metric/{}".format(k), v.avg, self.epoch)
else:
raise ValueError("Wrong split string: {}".format(split))
def check_best_save_last_checkpoints(self, save_latest_freq=5, save_best_after=50):
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
"scheduler": self.scheduler.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
if self.dataloaders["val"] is not None:
state["best_val_score"] = self.best_val_score
if self.dataloaders["test"] is not None:
state["best_test_score"] = self.best_test_score
# save latest checkpoint
if self.epoch % save_latest_freq == 0:
latest_ckpt_name = os.path.join(self.params.model_dir, "model_latest.pth")
torch.save(state, latest_ckpt_name)
self.logger.info("Saved latest checkpoint to: {}".format(latest_ckpt_name))
# save val latest metrics, and check if val is best checkpoints
if self.dataloaders["val"] is not None:
val_latest_metrics_name = os.path.join(self.params.model_dir, "val_metrics_latest.json")
utils.save_dict_to_json(self.val_status, val_latest_metrics_name)
is_best = self.cur_val_score < self.best_val_score
if is_best:
# save metrics
self.best_val_score = self.cur_val_score
best_metrics_name = os.path.join(self.params.model_dir, "val_metrics_best.json")
utils.save_dict_to_json(self.val_status, best_metrics_name)
self.logger.info("Current is val best, score={:.7f}".format(self.best_val_score))
# save checkpoint
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "val_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved val best checkpoint to: {}".format(best_ckpt_name))
# save test latest metrics, and check if test is best checkpoints
if self.dataloaders["test"] is not None:
test_latest_metrics_name = os.path.join(self.params.model_dir, "test_metrics_latest.json")
utils.save_dict_to_json(self.test_status, test_latest_metrics_name)
is_best = self.cur_test_score < self.best_test_score
if is_best:
# save metrics
self.best_test_score = self.cur_test_score
best_metrics_name = os.path.join(self.params.model_dir, "test_metrics_best.json")
utils.save_dict_to_json(self.test_status, best_metrics_name)
self.logger.info("Current is test best, score={:.7f}".format(self.best_test_score))
# save checkpoint
if self.epoch > save_best_after:
best_ckpt_name = os.path.join(self.params.model_dir, "test_model_best.pth")
torch.save(state, best_ckpt_name)
self.logger.info("Saved test best checkpoint to: {}".format(best_ckpt_name))
def load_checkpoints(self):
state = torch.load(self.params.restore_file)
ckpt_component = []
if "state_dict" in state and self.model is not None:
try:
self.model.load_state_dict(state["state_dict"])
except RuntimeError:
print("Using custom loading net")
net_dict = self.model.state_dict()
if "module" not in list(state["state_dict"].keys())[0]:
state_dict = {"module." + k: v for k, v in state["state_dict"].items() if "module." + k in net_dict.keys()}
else:
state_dict = {k: v for k, v in state["state_dict"].items() if k in net_dict.keys()}
net_dict.update(state_dict)
self.model.load_state_dict(net_dict, strict=False)
ckpt_component.append("net")
if not self.params.only_weights:
if "optimizer" in state and self.optimizer is not None:
try:
self.optimizer.load_state_dict(state["optimizer"])
except RuntimeError:
print("Using custom loading optimizer")
optimizer_dict = self.optimizer.state_dict()
state_dict = {k: v for k, v in state["optimizer"].items() if k in optimizer_dict.keys()}
optimizer_dict.update(state_dict)
self.optimizer.load_state_dict(optimizer_dict)
ckpt_component.append("opt")
if "scheduler" in state and self.train_status["scheduler"] is not None:
try:
self.scheduler.load_state_dict(state["scheduler"])
except RuntimeError:
print("Using custom loading scheduler")
scheduler_dict = self.scheduler.state_dict()
state_dict = {k: v for k, v in state["scheduler"].items() if k in scheduler_dict.keys()}
scheduler_dict.update(state_dict)
self.scheduler.load_state_dict(scheduler_dict)
ckpt_component.append("sch")
if "step" in state:
self.step = state["step"] + 1
ckpt_component.append("step")
if "epoch" in state:
self.epoch = state["epoch"] + 1
ckpt_component.append("epoch")
if "best_val_score" in state:
self.best_val_score = state["best_val_score"]
ckpt_component.append("best val score: {:.3g}".format(self.best_val_score))
if "best_test_score" in state:
self.best_test_score = state["best_test_score"]
ckpt_component.append("best test score: {:.3g}".format(self.best_test_score))
ckpt_component = ", ".join(i for i in ckpt_component)
self.logger.info("Loaded models from: {}".format(self.params.restore_file))
self.logger.info("Ckpt load: {}".format(ckpt_component))
| [
"torch.save",
"torch.utils.tensorboard.SummaryWriter",
"torch.load"
] | 1.9.1 | hxwork/OMNet | be88a734e7327def365e1875bbc7cd2fea1539b0 |
1.9 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Define the siamese network for one-shot learning,
for french short labels
02/06/2021
@author: milena-git, from jeremylhour courtesy
"""
import torch
import torch.nn as nn
def _createEmbeddingLayer(weights_matrix, non_trainable=False):
"""
_createEmbeddingLayer:
create a layer from pre-trained embeddings
@param weights_matrix (np.array):
@param non_trainable (bool):
"""
weights_matrix = torch.tensor(weights_matrix)
num_embeddings, embedding_dim = weights_matrix.size()
emb_layer = nn.Embedding(num_embeddings, embedding_dim)
emb_layer.load_state_dict({'weight': weights_matrix})
if non_trainable:
emb_layer.weight.requires_grad = False
return emb_layer, num_embeddings, embedding_dim
class SiamesePreTrainedQuadruplet(nn.Module):
def __init__(self, weights_matrix, length, dim=100):
"""
Initialize the siamese network with pre-trained embeddings
@param weights_matrix (torch.tensor):
@param length (int): longueur des inputs
@param dim (int): dimension of the output embedding space
"""
super(SiamesePreTrainedQuadruplet, self).__init__()
self.dim = dim
self.length = length
self.embedding = nn.Embedding.from_pretrained(weights_matrix, padding_idx=0)
self.fc1 = nn.Sequential(
nn.Linear(self.length * weights_matrix.size()[1], 1000),
nn.ReLU(inplace=True),
nn.Linear(1000, 800),
nn.Dropout(0.2),
nn.Linear(800, 500),
nn.Dropout(0.2),
nn.Linear(500, self.dim)
)
def forward_once(self, x):
"""
Run one of the network on a single image
@param x (): img output from SiameseNetworkDataset
"""
embedded = self.embedding(x)
embedded = torch.reshape(embedded, (embedded.size()[0], embedded.size()[1] * embedded.size()[2]))
output = self.fc1(embedded)
return output
def forward(self, anchor, positive, negative1, negative2):
"""
Run the model forward, by applying forward_once to each inputs
Main forward that is used during train, wraps forward_once().
@param anchor, positive, negative1, negative2 (): output from SiameseNetworkDataset
"""
anchor_o, positive_o, negative1_o, negative2_o = self.forward_once(anchor), self.forward_once(
positive), self.forward_once(negative1), self.forward_once(negative2)
return anchor_o, positive_o, negative1_o, negative2_o
if __name__ == '__main__':
pass
| [
"torch.nn.Linear",
"torch.nn.Embedding.from_pretrained",
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.tensor",
"torch.nn.Embedding"
] | 1.9.0 | pengfei99/openfood | 2b65af02ce34bf8193d357ef3661da749d2d9671 |
0.20 | # from code.transformer_vid.utils import convert_weights
# import rotary_embedding_torch
from torch.nn.modules.activation import GELU, ReLU
# from data.OneCombo3.trainer import TrainerConfig
import math
import numpy as np
import itertools
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torchvision.models.video import r3d_18
# from ResNet3D import r3d_18
from scipy.optimize import linear_sum_assignment
# from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding
from einops.layers.torch import Rearrange
logger = logging.getLogger(__name__)
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d,
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.2
resid_pdrop = 0.2
attn_pdrop = 0.2
pos_pdrop = 0.2
temp_pdrop = 0.2
pos_emb = True
temp_emb = True
start_prune = 30
epoch = 0
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class neuralGPTConfig:
""" base GPT config, params common to all GPT versions """
n = 0.4
im_drop = 0.2
id_drop = n
embd_pdrop = n
resid_pdrop = n
attn_pdrop = n
pos_pdrop = n
temp_pdrop = n
pos_emb = True
temp_emb = True
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class VideoFeaturesExtractor(nn.Module):
"""
R3D: (3 x T x H x W)
H, W = 112
"""
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))
convert_weights(self.backbone)
# # freeze backbone
# for k, v in self.backbone.named_parameters():
# v.requires_grad = False
def forward(self, x):
# B = Batch, T, C, Fm, H, W
features = self.backbone(x) # (B, C, T, H, W)
B, C, T, H, W = features.shape
features = features.permute(0, 2, 3, 4, 1)
features = features.view(B, -1, C)
return features
class VideoEncoder(nn.Module):
def __init__(self):
super().__init__()
self.to_patch_embedding = nn.Sequential(
Rearrange('b c t (h p1) (w p2) -> b (t h w) (p1 p2 c)', p1=16, p2=16)
)
def forward(self, x):
return self.to_patch_embedding(x)
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.config = config
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer("mask", self.build_mask(config.block_size))
self.n_head = config.n_head
self.att = None
self.T = config.block_size
# self.rotary_embedding = RotarySpatioTemporalEmbedding(config)
def build_mask(self, block_size):
mask = torch.tril(torch.ones((block_size, block_size)),
).view(1, 1, block_size, block_size)
return mask
def generate_sparse_mask(self, att, p, config):
"""
Generate a sparse mask according to p.
"""
assert p >= 0 and p <= 1, "p should be in [0, 1]"
T = config.block_size
mask = torch.rand((1, T)) < p
mask = mask.repeat(T, 1)
mask[0, 0] = False # don't mask 1st step
# check if any step is fully masked and umask it
idx_all_true = (True == torch.all(mask, dim=0)).nonzero()
for step in idx_all_true:
sampler = torch.distributions.Uniform(low=0, high=step.item()+1)
idx_false = sampler.sample((1,1)).long()
mask[step, idx_false] = False
# mask = mask.repeat(T, 1)
mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T)
att = att.masked_fill(mask, float('-inf'))
return att
def forward(self, x, pad=None, dtx=None):
# B = Batch, T = Sequence, C = n_embed
B, T, C = x.size()
# calculate query, key, values for all head in batch and move head forward to the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# # apply rotary embeddings
# if dtx is not None:
# q, k = self.rotary_embedding(q, k, dtx)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
if self.training:
att = self.generate_sparse_mask(att, 0.25, self.config)
if pad is not None:
for idx, i in enumerate(pad):
att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
self.att = att
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class PositionalEmbedding(nn.Module):
""" Implement the PE function. """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# class RotarySpatioTemporalEmbedding(nn.Module):
# """ Rotary temporal embeddings - block_size = id_blk_sz """
# def __init__(self, config):
# super().__init__()
# self.frame_block_size = config.frame_block_size
# self.id_block_size = config.id_block_size
# self.emb = RotaryEmbedding(dim=32)
# def forward(self, q, k, t):
# b = t.shape[0]
# tf = self.frame_block_size
# queries = []
# keys = []
# for B in range(b):
# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))
# im_pos_emb = torch.arange(self.frame_block_size)
# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)
# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)
# freqs = self.emb(torch.cat(im_emb, id_temp_emb))
# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))
# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))
# q, k = torch.cat(queries), torch.cat(keys)
# return q, k
class TemporalEmbedding(nn.Module):
""" encoding temporal information using fourrier signals """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class LearntTemporalEmbedding(nn.Module):
"""
Project B x T x 1 time sequence to
B x T x C
"""
def __init__(self, block_sz, n_embd, p_drop=0.2):
super().__init__()
self.temp_emb = nn.Sequential(
nn.Linear(1, n_embd // 2),
nn.GELU(),
nn.Linear(n_embd // 2, n_embd),
nn.Dropout(p_drop)
)
def forward(self, x):
return self.temp_emb(x.unsqueeze(-1))
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
# decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head,
# activation='gelu', dropout=0.2, batch_first=True)
# self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)
self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head,
num_encoder_layers=3, num_decoder_layers=config.n_layer,
activation="gelu", dropout=0.4, batch_first=True)
self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size))
# self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size))
self.T = config.id_block_size
def generate_square_subsequent_mask(self, sz: int, pad=None):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_padding_mask(self, sz: int, pad=None):
r"""Build a (B x T) mask that resides on the GPU and can be
manipulated by build_padding_mask according to padded sequence
"""
mask = torch.zeros(1, sz, dtype=torch.bool)
return mask
def generate_sparse_mask(self, sz: int, pad=None):
r""" Build a square mask that employs
teacher forcing according to P
"""
rand_mat = torch.rand(1, sz)
k = round(0.75 * sz)
k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]
bool_tensor = rand_mat <= k_th_quant
mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask
def build_padding_mask(self, tgt, pad):
# mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)
mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)
for B, P in enumerate(pad):
mask[B, self.T - P:] = True
return mask # .to(torch.cuda.current_device())
def forward(self, tgt, memory, pad):
# padding_mask = self.build_padding_mask(tgt, pad)
# tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask
return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask,
tgt_key_padding_mask=None)
class ProjectNorm(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
self.ln = nn.LayerNorm(feat_size)
self.mlp = nn.Sequential(
nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),
nn.GELU(),
nn.Linear(math.floor(2 * feat_size), target_size, bias=False),
)
def forward(self, x):
return self.mlp(self.ln(x))
class TimeProjection(nn.Module):
def __init__(self, seq_size, id_seq_size, feat_size, target_size):
super().__init__()
self.mlp_seq = nn.Sequential(
nn.Linear(seq_size, id_seq_size),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(id_seq_size, id_seq_size)
)
self.mlp_t = nn.Sequential(
nn.Linear(feat_size, feat_size // 2),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(feat_size // 2, target_size)
)
def forward(self, x):
x = x.permute(0, 2, 1) # B, T, C -> B, C, T
x = self.mlp_seq(x) # B, C, T / 2
x = x.permute(0, 2, 1) # B, T / 2, C
return self.mlp_t(x) # B, T / 2, 1
class PSTHProjection(nn.Module):
"""Takes Last Output of Block -> (B, C)
Builds PSTH table
"""
def __init__(self, config):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),
nn.Dropout(p=0.2),
nn.GELU(),
nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
)
def forward(self, x):
return self.mlp(x)
# class PSTHProjection(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.mlp_seq = nn.Sequential(
# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.id_block_size // 2, 1, bias=False)
# )
# self.mlp_t = nn.Sequential(
# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
# )
# def forward(self, x):
# x = x.transpose(-1, -2) # B, T, C -> B, C, T
# x = self.mlp_seq(x) # B, C, 1
# x = x.transpose(-2, -1) # B, 1, Vocab_id
# return self.mlp_t(x)
class TimeRNN(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, pad=None, dtx=None):
x = x + self.attn(self.ln1(x), pad)
x = x + self.mlp(self.ln2(x))
return x
class BlockSequential(nn.Sequential):
def forward(self, x, pad=None, dtx=None):
for module in self._modules.values():
x = module(x, pad, dtx)
return x
class DiceLossPSTH(nn.Module):
def __init__(self, size_average=True, smooth=1):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets, smooth=1, class_weights=None):
total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])
# probs = F.log_softmax(logits, dim=-1)
probs = F.softmax(total_logits, dim=-1)
# logits = F.gelu(logits)
# probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))
# flatten label and prediction tensors
outputs = probs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
labels = torch.zeros_like(outputs)
labels[targets] = 1 / len(targets)
# intersection = (outputs * labels).sum()
# dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)
return self.cross_entropy(outputs[None, ...], labels[None, ...])
class SetLoss(nn.Module):
def __init__(self):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets):
targets = targets.contiguous().view(-1)
loss = 0
for n_step, n_logits in enumerate(logits):
n_logits = F.softmax(n_logits, dim=-1)
n_target = targets[n_step:]
n_target_dist = torch.zeros_like(n_logits)
if len(n_target) != 0:
n_target_dist[n_target] = 1 / len(n_target)
loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])
return loss / len(logits)
class TruncatedLoss(nn.Module):
def __init__(self, q=0.8, k=0.2, trainset_size=50000):
super(TruncatedLoss, self).__init__()
self.q = q
self.k = k
self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)
def forward(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]
loss = torch.mean(loss)
return loss
def update_weight(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
Lq = ((1-(Yg**self.q))/self.q)
Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))
Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)
Lqk = torch.unsqueeze(Lqk, 1)
condition = torch.gt(Lqk, Lq)
self.weight[indexes] = condition.type(torch.cuda.FloatTensor)
# class PSTHLOSS(nn.Module):
# def __init__(self):
# super().__init__()
# def forward(self, logits, targets):
# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension
# probs = F.softmax(total_logits, dim=-1)
# outptu
class HungarianMatcher(nn.Module):
def __init__(self):
super().__init__()
@torch.no_grad()
def forward(self, logits, targets):
T, C = logits.size()
probs = F.softmax(logits, dim=-1)
cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class KLDivLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
self.KLdiv = nn.KLDivLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.KLdiv(log_probs.long(), targets)
class PoissonCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
# self.softmax = nn.Softmax(dim=-1)
self.nll_poisson = nn.PoissonNLLLoss()
# self.nll_poisson = nn.NLLLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.nll_poisson(log_probs, targets)
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.config = config
# input embedding stem
self.n_embd = config.n_embd
self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)
self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)
# self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))
self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))
# self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)
# self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)
self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)
self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)
self.id_drop = nn.Dropout(config.id_drop)
self.im_drop = nn.Dropout(config.im_drop)
self.drop = nn.Dropout(config.embd_pdrop)
# -- Visual Backbone -- #
# self.visual_backbone = VideoFeaturesExtractor()
self.video_encoder = VideoEncoder()
frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)
self.register_buffer("frame_temp_emb_seq", frame_temp_emb)
# -- Contrastive Loss -- ##
# self.proj_id = ProjectNorm(config.n_embd, config.n_embd)
# self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape
## -- IM_Decoder -- ##
# self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])
# self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])
# self.ln_f_id = nn.LayerNorm(config.n_embd)
# self.ln_f_im = nn.LayerNorm(config.n_embd)
## -- Decoder -- ##
# self.ln_f = nn.LayerNorm(config.n_embd)
## GPT
# self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])
# self.ln_f = nn.LayerNorm(config.n_embd)
## enc_dec
self.state_decoder = Decoder(config)
self.ln_f_state_dec = nn.LayerNorm(config.n_embd)
self.stimulus_decoder = Decoder(config)
self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
## -- Time -- ##
# self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, 1)
## -- PSTH -- ##
# self.proj_psth = PSTHProjection(config)
# Loss
# self.dice_loss = DiceLossPSTH()
# self.poisson_loss = PoissonCrossEntropyLoss()
# self.hungarian_matcher = HungarianMatcher()
# self.kldiv_loss = KLDivLoss()
# self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)
# self.set_loss = SetLoss()
# self.a = torch.tensor(0.5, requires_grad=True)
self.block_size = config.block_size
self.apply(self._init_weights)
if config.class_weights is not None:
self.register_buffer("class_weights", config.class_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
Separates parameters into those who will experience weight decay and those that will not
"""
if train_config.decay_weights:
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
else: no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
black_list_mods = ['pos_emb', 'temp_emb']
for mods in black_list_mods:
for name, param in self.named_parameters():
if mods in name:
no_decay.add(name) # also pos_emb
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
no_decay -= decay & no_decay
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
else:
parameters = self.parameters()
optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)
return optimizer
def process_features(self, x):
# batch, block_size, feature
p_idx = x['id_prev']
idx = x['id']
dtx = x['dt']
dtx_prev = x['dt_prev']
frames = self.video_encoder(x['frames'])
pad = x['pad']
b, t = idx.size()
# b_p, t_p = p_idx.size()
bf, tf = frames.size()[0:2]
# forward the GPT model
'''
positional and temporal embeddings implemented in multiple ways, learnt,
fourrier decomposition and in the case of time, just passed as is.
'''
# # Embeddings
prev_id_position_embeddings = 0 # self.pos_emb(p_idx)
prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())
id_position_embeddings = 0 # self.pos_emb(idx)
im_position_embeddings = self.pos_emb_frames
temporal_embeddings = self.temp_emb(dtx.float())
# Extract ID features
prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings
token_embeddings = self.id_drop(token_embeddings)
# Extract image features and add time embeddings
im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)
im_embeddings = frames # self.tok_emb(frames)
im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings
im_embeddings = self.im_drop(im_embeddings) # separate pos emb?
# Tidy up
features = dict()
features['id_prev'] = prev_token_embeddings
features['id'] = token_embeddings
features['frames'] = im_embeddings
return features, pad
def perceiver(self, features, pad):
x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)
x = self.ln_f_state_dec(x)
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def enc_dec(self, features, pad):
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def GPTdecoder(self, features, pad, dtx=None):
# image + neural features
x = torch.cat((features['frames'], features['id']), dim=1)
# Decoder
x = self.blocks(x, pad, dtx) # (B, T, C)
x = self.ln_f(x)
logits = self.head(x)
# print(logits.shape) # (B, T, Vocab)
# logits_psth = x[:, -1] # (B, C)
return logits, x
def forward(self, x, targets=None):
idx = x['id']
dtx = x['dt']
frames = x['frames']
pad = x['pad']
b, t = idx.size()
# b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]
bf, tf = frames.size()[0:2]
tf = self.config.frame_block_size
# assert t + tf == self.config.block_size, f"{tf} {t}"
# assert t <= self.block_size, "Cannot forward, model block size is exhausted"
features, pad = self.process_features(x)
logits, x = self.perceiver(features, pad)
# logits, x = self.enc_dec(features, pad)
# logits, x = self.GPTdecoder(features, pad)
# time = self.proj_time(x) # (B, T_id, 1)
# print(x[:, 0].shape)
# psth = self.proj_psth(x) # (B, Vocab_id)
# if targets, calculate loss
# calculate loss on logits up to padding token for each batch
loss = None
loss_frames = 0
loss_id = []
loss_time = []
loss_dice = []
loss_psth = []
loss_hungarian = []
if targets is not None:
# loss_psth = self.dice_loss(psth, targets['modes'][:, tf:])
for B, P in enumerate(pad):
tf = 0
# im_logits = logits[B, :tf]
# im_targets = targets['frames'][B, :tf]
# loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))
id_logits = logits[B, tf:tf + t - P]
id_targets = targets['id'][B, :t - P]
loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1))
# if self.config.epoch >= 15:
# self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# time_preds = time[B, :t - P]
# time_targets = targets['dt'][B, :t - P]
# loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1))
# loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)
# loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))
# if len(id_targets) > 0:
# indices = self.hungarian_matcher(id_logits, id_targets)
# probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]
# loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)
# loss_hungarian.append(loss_hungarian_)
# # psth = self.proj_psth(x[B, -1]) # from the EOS position
# loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))
# loss_psth_ = self.dice_loss(id_logits, id_targets)
# loss_psth.append(torch.nan_to_num(loss_psth_))
# loss_time.append(torch.nan_to_num(loss_time_))
loss_id.append(torch.nan_to_num(loss_id_))
loss = dict()
# loss['frames'] = loss_frames / (b / 3)
loss['id'] = sum(loss_id) / (b) # sum(loss_id) / (b * 2) # / len(loss_id)
# loss['time'] = sum(loss_time) / (b * 2)
# loss['dice'] = sum(loss_dice) / len(loss_dice)
# loss['dt'] = loss_time / (b * 50)
# loss['hungarian'] = sum(loss_hungarian) / (b * 2)
# loss['psth'] = sum(loss_psth) / (b * 2)
for key in list(loss):
if isinstance(loss[key], float):
del loss[key]
preds = dict()
preds['logits'] = logits # [:, tf:] # only id logits
# preds['dt'] = time
return preds, features, loss | [
"torch.nn.Linear",
"torch.cat",
"torch.optim.AdamW",
"torch.gt",
"torch.cuda.current_device",
"torch.ones",
"torch.cuda.is_available",
"torch.sum",
"torch.nn.PoissonNLLLoss",
"torch.topk",
"torch.nn.LayerNorm",
"torch.nan_to_num",
"torch.unsqueeze",
"torch.tensor",
"torch.nn.KLDivLoss",
"torch.zeros_like",
"torch.as_tensor",
"torch.zeros",
"torch.cos",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"torch.mean",
"torch.log",
"torch.nn.GELU",
"torch.nn.LogSoftmax",
"torch.rand",
"torch.nn.Dropout",
"torch.sin",
"torch.nn.Transformer",
"torch.arange",
"torch.no_grad",
"torch.optim.Adam",
"torch.from_numpy",
"torch.all",
"torch.nn.Embedding"
] | 0.20.1 | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 |
0.20 | # from code.transformer_vid.utils import convert_weights
# import rotary_embedding_torch
from torch.nn.modules.activation import GELU, ReLU
# from data.OneCombo3.trainer import TrainerConfig
import math
import numpy as np
import itertools
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torchvision.models.video import r3d_18
# from ResNet3D import r3d_18
from scipy.optimize import linear_sum_assignment
# from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding
from einops.layers.torch import Rearrange
logger = logging.getLogger(__name__)
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d,
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.2
resid_pdrop = 0.2
attn_pdrop = 0.2
pos_pdrop = 0.2
temp_pdrop = 0.2
pos_emb = True
temp_emb = True
start_prune = 30
epoch = 0
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class neuralGPTConfig:
""" base GPT config, params common to all GPT versions """
n = 0.4
im_drop = 0.2
id_drop = n
embd_pdrop = n
resid_pdrop = n
attn_pdrop = n
pos_pdrop = n
temp_pdrop = n
pos_emb = True
temp_emb = True
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class VideoFeaturesExtractor(nn.Module):
"""
R3D: (3 x T x H x W)
H, W = 112
"""
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))
convert_weights(self.backbone)
# # freeze backbone
# for k, v in self.backbone.named_parameters():
# v.requires_grad = False
def forward(self, x):
# B = Batch, T, C, Fm, H, W
features = self.backbone(x) # (B, C, T, H, W)
B, C, T, H, W = features.shape
features = features.permute(0, 2, 3, 4, 1)
features = features.view(B, -1, C)
return features
class VideoEncoder(nn.Module):
def __init__(self, n_embd):
super().__init__()
p1, p2 = 16
assert n_embd % (p1 * p2) == 0, "n_embd must be divisible by p1 * p2"
c = n_embd // (p1 * p2)
self.to_patch_embedding = nn.Sequential(
Rearrange(f'b {c} t (h {p1}) (w {p2}) -> b (t h w) (p1 p2 {c})', p1=16, p2=16)
)
def forward(self, x):
return self.to_patch_embedding(x)
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
self.config = config
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer("mask", self.build_mask(config.block_size))
self.n_head = config.n_head
self.att = None
self.T = config.block_size
# self.rotary_embedding = RotarySpatioTemporalEmbedding(config)
def build_mask(self, block_size):
mask = torch.tril(torch.ones((block_size, block_size)),
).view(1, 1, block_size, block_size)
return mask
def generate_sparse_mask(self, att, p, config):
"""
Generate a sparse mask according to p.
"""
assert p >= 0 and p <= 1, "p should be in [0, 1]"
T = config.block_size
mask = torch.rand((1, T)) < p
mask = mask.repeat(T, 1)
mask[0, 0] = False # don't mask 1st step
# check if any step is fully masked and umask it
idx_all_true = (True == torch.all(mask, dim=0)).nonzero()
for step in idx_all_true:
sampler = torch.distributions.Uniform(low=0, high=step.item()+1)
idx_false = sampler.sample((1,1)).long()
mask[step, idx_false] = False
# mask = mask.repeat(T, 1)
mask = mask.view(1, 1, T, T).cuda() if att.is_cuda else mask.view(1, 1, T, T)
att = att.masked_fill(mask, float('-inf'))
return att
def forward(self, x, pad=None, dtx=None):
# B = Batch, T = Sequence, C = n_embed
B, T, C = x.size()
# calculate query, key, values for all head in batch and move head forward to the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# # apply rotary embeddings
# if dtx is not None:
# q, k = self.rotary_embedding(q, k, dtx)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
if self.training:
att = self.generate_sparse_mask(att, 0.25, self.config)
if pad is not None:
for idx, i in enumerate(pad):
att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
self.att = att
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class PositionalEmbedding(nn.Module):
""" Implement the PE function. """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# class RotarySpatioTemporalEmbedding(nn.Module):
# """ Rotary temporal embeddings - block_size = id_blk_sz """
# def __init__(self, config):
# super().__init__()
# self.frame_block_size = config.frame_block_size
# self.id_block_size = config.id_block_size
# self.emb = RotaryEmbedding(dim=32)
# def forward(self, q, k, t):
# b = t.shape[0]
# tf = self.frame_block_size
# queries = []
# keys = []
# for B in range(b):
# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))
# im_pos_emb = torch.arange(self.frame_block_size)
# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)
# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)
# freqs = self.emb(torch.cat(im_emb, id_temp_emb))
# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))
# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))
# q, k = torch.cat(queries), torch.cat(keys)
# return q, k
class TemporalEmbedding(nn.Module):
""" encoding temporal information using fourrier signals """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class LearntTemporalEmbedding(nn.Module):
"""
Project B x T x 1 time sequence to
B x T x C
"""
def __init__(self, block_sz, n_embd, p_drop=0.2):
super().__init__()
self.temp_emb = nn.Sequential(
nn.Linear(1, n_embd // 2),
nn.GELU(),
nn.Linear(n_embd // 2, n_embd),
nn.Dropout(p_drop)
)
def forward(self, x):
return self.temp_emb(x.unsqueeze(-1))
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
# decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head,
# activation='gelu', dropout=0.2, batch_first=True)
# self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)
self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head,
num_encoder_layers=3, num_decoder_layers=config.n_layer,
activation="gelu", dropout=0.4, batch_first=True)
self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size))
# self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size))
self.T = config.id_block_size
def generate_square_subsequent_mask(self, sz: int, pad=None):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_padding_mask(self, sz: int, pad=None):
r"""Build a (B x T) mask that resides on the GPU and can be
manipulated by build_padding_mask according to padded sequence
"""
mask = torch.zeros(1, sz, dtype=torch.bool)
return mask
def generate_sparse_mask(self, sz: int, pad=None):
r""" Build a square mask that employs
teacher forcing according to P
"""
rand_mat = torch.rand(1, sz)
k = round(0.75 * sz)
k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]
bool_tensor = rand_mat <= k_th_quant
mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask
def build_padding_mask(self, tgt, pad):
# mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)
mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)
for B, P in enumerate(pad):
mask[B, self.T - P:] = True
return mask # .to(torch.cuda.current_device())
def forward(self, tgt, memory, pad):
# padding_mask = self.build_padding_mask(tgt, pad)
# tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask
return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask,
tgt_key_padding_mask=None)
class ProjectNorm(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
self.ln = nn.LayerNorm(feat_size)
self.mlp = nn.Sequential(
nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),
nn.GELU(),
nn.Linear(math.floor(2 * feat_size), target_size, bias=False),
)
def forward(self, x):
return self.mlp(self.ln(x))
class TimeProjection(nn.Module):
def __init__(self, seq_size, id_seq_size, feat_size, target_size):
super().__init__()
self.mlp_seq = nn.Sequential(
nn.Linear(seq_size, id_seq_size),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(id_seq_size, id_seq_size)
)
self.mlp_t = nn.Sequential(
nn.Linear(feat_size, feat_size // 2),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(feat_size // 2, target_size)
)
def forward(self, x):
x = x.permute(0, 2, 1) # B, T, C -> B, C, T
x = self.mlp_seq(x) # B, C, T / 2
x = x.permute(0, 2, 1) # B, T / 2, C
return self.mlp_t(x) # B, T / 2, 1
class PSTHProjection(nn.Module):
"""Takes Last Output of Block -> (B, C)
Builds PSTH table
"""
def __init__(self, config):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),
nn.Dropout(p=0.2),
nn.GELU(),
nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
)
def forward(self, x):
return self.mlp(x)
# class PSTHProjection(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.mlp_seq = nn.Sequential(
# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.id_block_size // 2, 1, bias=False)
# )
# self.mlp_t = nn.Sequential(
# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
# )
# def forward(self, x):
# x = x.transpose(-1, -2) # B, T, C -> B, C, T
# x = self.mlp_seq(x) # B, C, 1
# x = x.transpose(-2, -1) # B, 1, Vocab_id
# return self.mlp_t(x)
class TimeRNN(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, pad=None, dtx=None):
x = x + self.attn(self.ln1(x), pad)
x = x + self.mlp(self.ln2(x))
return x
class BlockSequential(nn.Sequential):
def forward(self, x, pad=None, dtx=None):
for module in self._modules.values():
x = module(x, pad, dtx)
return x
class DiceLossPSTH(nn.Module):
def __init__(self, size_average=True, smooth=1):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets, smooth=1, class_weights=None):
total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])
# probs = F.log_softmax(logits, dim=-1)
probs = F.softmax(total_logits, dim=-1)
# logits = F.gelu(logits)
# probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))
# flatten label and prediction tensors
outputs = probs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
labels = torch.zeros_like(outputs)
labels[targets] = 1 / len(targets)
# intersection = (outputs * labels).sum()
# dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)
return self.cross_entropy(outputs[None, ...], labels[None, ...])
class SetLoss(nn.Module):
def __init__(self):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets):
targets = targets.contiguous().view(-1)
loss = 0
for n_step, n_logits in enumerate(logits):
n_logits = F.softmax(n_logits, dim=-1)
n_target = targets[n_step:]
n_target_dist = torch.zeros_like(n_logits)
if len(n_target) != 0:
n_target_dist[n_target] = 1 / len(n_target)
loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])
return loss / len(logits)
class TruncatedLoss(nn.Module):
def __init__(self, q=0.8, k=0.2, trainset_size=50000):
super(TruncatedLoss, self).__init__()
self.q = q
self.k = k
self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)
def forward(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]
loss = torch.mean(loss)
return loss
def update_weight(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
Lq = ((1-(Yg**self.q))/self.q)
Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))
Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)
Lqk = torch.unsqueeze(Lqk, 1)
condition = torch.gt(Lqk, Lq)
self.weight[indexes] = condition.type(torch.cuda.FloatTensor)
# class PSTHLOSS(nn.Module):
# def __init__(self):
# super().__init__()
# def forward(self, logits, targets):
# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension
# probs = F.softmax(total_logits, dim=-1)
# outptu
class HungarianMatcher(nn.Module):
def __init__(self):
super().__init__()
@torch.no_grad()
def forward(self, logits, targets):
T, C = logits.size()
probs = F.softmax(logits, dim=-1)
cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class KLDivLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
self.KLdiv = nn.KLDivLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.KLdiv(log_probs.long(), targets)
class PoissonCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
# self.softmax = nn.Softmax(dim=-1)
self.nll_poisson = nn.PoissonNLLLoss()
# self.nll_poisson = nn.NLLLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.nll_poisson(log_probs, targets)
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.config = config
# input embedding stem
self.n_embd = config.n_embd
self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)
self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)
# self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))
self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))
# self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)
# self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)
self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)
self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)
self.id_drop = nn.Dropout(config.id_drop)
self.im_drop = nn.Dropout(config.im_drop)
self.drop = nn.Dropout(config.embd_pdrop)
# -- Visual Backbone -- #
# self.visual_backbone = VideoFeaturesExtractor()
self.video_encoder = VideoEncoder()
frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)
self.register_buffer("frame_temp_emb_seq", frame_temp_emb)
# -- Contrastive Loss -- ##
# self.proj_id = ProjectNorm(config.n_embd, config.n_embd)
# self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape
## -- IM_Decoder -- ##
# self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])
# self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])
# self.ln_f_id = nn.LayerNorm(config.n_embd)
# self.ln_f_im = nn.LayerNorm(config.n_embd)
## -- Decoder -- ##
# self.ln_f = nn.LayerNorm(config.n_embd)
## GPT
# self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])
# self.ln_f = nn.LayerNorm(config.n_embd)
## enc_dec
self.state_decoder = Decoder(config)
self.ln_f_state_dec = nn.LayerNorm(config.n_embd)
self.stimulus_decoder = Decoder(config)
self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
## -- Time -- ##
# self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)
self.proj_time = ProjectNorm(config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, 1)
## -- PSTH -- ##
# self.proj_psth = PSTHProjection(config)
# Loss
# self.dice_loss = DiceLossPSTH()
# self.poisson_loss = PoissonCrossEntropyLoss()
# self.hungarian_matcher = HungarianMatcher()
# self.kldiv_loss = KLDivLoss()
# self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)
# self.set_loss = SetLoss()
# self.a = torch.tensor(0.5, requires_grad=True)
self.block_size = config.block_size
self.apply(self._init_weights)
if config.class_weights is not None:
for key in config.class_weights.keys():
self.register_buffer(f"class_weights_{key}", config.class_weights[key])
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
Separates parameters into those who will experience weight decay and those that will not
"""
if train_config.decay_weights:
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
else: no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
black_list_mods = ['pos_emb', 'temp_emb']
for mods in black_list_mods:
for name, param in self.named_parameters():
if mods in name:
no_decay.add(name) # also pos_emb
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
no_decay -= decay & no_decay
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
else:
parameters = self.parameters()
optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)
return optimizer
def process_features(self, x):
# batch, block_size, feature
p_idx = x['id_prev']
idx = x['id']
dtx = x['dt']
dtx_prev = x['dt_prev']
frames = self.video_encoder(x['frames'])
pad = x['pad']
b, t = idx.size()
# b_p, t_p = p_idx.size()
bf, tf = frames.size()[0:2]
# forward the GPT model
'''
positional and temporal embeddings implemented in multiple ways, learnt,
fourrier decomposition and in the case of time, just passed as is.
'''
# # Embeddings
prev_id_position_embeddings = self.pos_emb(p_idx)
prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())
id_position_embeddings = self.pos_emb(idx)
im_position_embeddings = self.pos_emb_frames
temporal_embeddings = self.temp_emb(dtx.float())
# Extract ID features
prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings
token_embeddings = self.id_drop(token_embeddings)
# Extract image features and add time embeddings
im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)
im_embeddings = frames # self.tok_emb(frames)
im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings
im_embeddings = self.im_drop(im_embeddings) # separate pos emb?
# Tidy up
features = dict()
features['id_prev'] = prev_token_embeddings
features['id'] = token_embeddings
features['frames'] = im_embeddings
return features, pad
def perceiver(self, features, pad):
x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)
x = self.ln_f_state_dec(x)
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def enc_dec(self, features, pad):
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def GPTdecoder(self, features, pad, dtx=None):
# image + neural features
x = torch.cat((features['frames'], features['id']), dim=1)
# Decoder
x = self.blocks(x, pad, dtx) # (B, T, C)
x = self.ln_f(x)
logits = self.head(x)
# print(logits.shape) # (B, T, Vocab)
# logits_psth = x[:, -1] # (B, C)
return logits, x
def forward(self, x, targets=None):
idx = x['id']
dtx = x['dt']
frames = x['frames']
pad = x['pad']
b, t = idx.size()
# b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]
bf, tf = frames.size()[0:2]
tf = self.config.frame_block_size
# assert t + tf == self.config.block_size, f"{tf} {t}"
# assert t <= self.block_size, "Cannot forward, model block size is exhausted"
features, pad = self.process_features(x)
logits, x = self.perceiver(features, pad)
# logits, x = self.enc_dec(features, pad)
# logits, x = self.GPTdecoder(features, pad)
time = self.proj_time(x) # (B, T_id, 1)
# print(x[:, 0].shape)
# psth = self.proj_psth(x) # (B, Vocab_id)
# if targets, calculate loss
# calculate loss on logits up to padding token for each batch
loss = None
loss_frames = 0
loss_id = []
loss_time = []
loss_dice = []
loss_psth = []
loss_hungarian = []
if targets is not None:
# loss_psth = self.dice_loss(psth, targets['modes'][:, tf:])
for B, P in enumerate(pad):
tf = 0
# im_logits = logits[B, :tf]
# im_targets = targets['frames'][B, :tf]
# loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))
id_logits = logits[B, tf:tf + t - P]
id_targets = targets['id'][B, :t - P]
loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1), weight=self.class_weights_id)
# if self.config.epoch >= 15:
# self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
time_preds = time[B, :t - P]
time_targets = targets['dt'][B, :t - P]
loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1), weight=self.class_weights_dt)
# loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)
# loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))
# if len(id_targets) > 0:
# indices = self.hungarian_matcher(id_logits, id_targets)
# probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]
# loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)
# loss_hungarian.append(loss_hungarian_)
# # psth = self.proj_psth(x[B, -1]) # from the EOS position
# loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))
# loss_psth_ = self.dice_loss(id_logits, id_targets)
# loss_psth.append(torch.nan_to_num(loss_psth_))
loss_time.append(torch.nan_to_num(loss_time_))
loss_id.append(torch.nan_to_num(loss_id_))
loss = dict()
# loss['frames'] = loss_frames / (b / 3)
loss['id'] = sum(loss_id) / (b * 2) # sum(loss_id) / (b * 2) # / len(loss_id)
loss['time'] = sum(loss_time) / (b * 2)
# loss['dice'] = sum(loss_dice) / len(loss_dice)
# loss['dt'] = loss_time / (b * 50)
# loss['hungarian'] = sum(loss_hungarian) / (b * 2)
# loss['psth'] = sum(loss_psth) / (b * 2)
for key in list(loss):
if isinstance(loss[key], float):
del loss[key]
preds = dict()
preds['id'] = logits # [:, tf:] # only id logits
preds['dt'] = time
return preds, features, loss | [
"torch.nn.Linear",
"torch.cat",
"torch.optim.AdamW",
"torch.gt",
"torch.cuda.current_device",
"torch.ones",
"torch.cuda.is_available",
"torch.sum",
"torch.nn.PoissonNLLLoss",
"torch.topk",
"torch.nn.LayerNorm",
"torch.nan_to_num",
"torch.unsqueeze",
"torch.tensor",
"torch.nn.KLDivLoss",
"torch.zeros_like",
"torch.as_tensor",
"torch.zeros",
"torch.cos",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"torch.mean",
"torch.log",
"torch.nn.GELU",
"torch.nn.LogSoftmax",
"torch.rand",
"torch.nn.Dropout",
"torch.sin",
"torch.nn.Transformer",
"torch.arange",
"torch.no_grad",
"torch.optim.Adam",
"torch.from_numpy",
"torch.all",
"torch.nn.Embedding"
] | 0.20.1 | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 |
1.1 | import torch.nn as nn
from .base import BaseLM
class IpaLM(BaseLM):
name = 'lstm'
def __init__(self, vocab_size, hidden_size, nlayers=1, dropout=0.1, embedding_size=None, **kwargs):
super().__init__(
vocab_size, hidden_size, nlayers=nlayers, dropout=dropout, embedding_size=embedding_size, **kwargs)
self.embedding = nn.Embedding(vocab_size, self.embedding_size)
self.lstm = nn.LSTM(
self.embedding_size, hidden_size, nlayers, dropout=(dropout if nlayers > 1 else 0), batch_first=True)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(hidden_size, vocab_size)
def forward(self, x, idx):
h_old = self.context(idx)
x_emb = self.dropout(self.get_embedding(x))
c_t, h_t = self.lstm(x_emb, h_old)
c_t = self.dropout(c_t).contiguous()
logits = self.out(c_t)
return logits, h_t
def get_embedding(self, x):
return self.embedding(x)
def initHidden(self, bsz=1):
weight = next(self.parameters()).data
return weight.new(self.nlayers, bsz, self.hidden_size).zero_(), \
weight.new(self.nlayers, bsz, self.hidden_size).zero_()
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.nn.Embedding"
] | 1.1.0 | tpimentelms/meaning2form | 624b3947b3ac2a7a521cf35c762fb56508236f74 |
1.10 | """Model Predictive Control with a Gaussian Process model.
Based on:
* L. Hewing, J. Kabzan and M. N. Zeilinger, "Cautious Model Predictive Control Using Gaussian Process Regression,"
in IEEE Transactions on Control Systems Technology, vol. 28, no. 6, pp. 2736-2743, Nov. 2020, doi: 10.1109/TCST.2019.2949757.
Implementation details:
1. The previous time step MPC solution is used to compute the set constraints and GP dynamics rollout.
Here, the dynamics are rolled out using the Mean Equivelence method, the fastest, but least accurate.
2. The GP is approximated using the Fully Independent Training Conditional (FITC) outlined in
* J. Quinonero-Candela, C. E. Rasmussen, and R. Herbrich, “A unifying view of sparse approximate Gaussian process regression,”
Journal of Machine Learning Research, vol. 6, pp. 1935–1959, 2005.
https://www.jmlr.org/papers/volume6/quinonero-candela05a/quinonero-candela05a.pdf
* E. Snelson and Z. Ghahramani, “Sparse gaussian processes using pseudo-inputs,” in Advances in Neural Information Processing
Systems, Y. Weiss, B. Scholkopf, and J. C. Platt, Eds., 2006, pp. 1257–1264.
and the inducing points are the previous MPC solution.
3. Each dimension of the learned error dynamics is an independent Zero Mean SE Kernel GP.
"""
import scipy
import numpy as np
import casadi as cs
import time
import torch
import gpytorch
from copy import deepcopy
from skopt.sampler import Lhs
from functools import partial
from sklearn.model_selection import train_test_split
from safe_control_gym.controllers.mpc.linear_mpc import LinearMPC, MPC
from safe_control_gym.controllers.mpc.mpc_utils import discretize_linear_system
from safe_control_gym.controllers.mpc.gp_utils import GaussianProcessCollection, ZeroMeanIndependentGPModel, covSEard
from safe_control_gym.envs.benchmark_env import Task
class GPMPC(MPC):
"""MPC with Gaussian Process as dynamics residual.
"""
def __init__(
self,
env_func,
seed: int = 1337,
horizon: int = 5,
q_mpc: list = [1],
r_mpc: list = [1],
additional_constraints: list = None,
use_prev_start: bool = True,
train_iterations: int = 800,
validation_iterations: int = 200,
optimization_iterations: list = None,
learning_rate: list = None,
normalize_training_data: bool = False,
use_gpu: bool = False,
gp_model_path: str = None,
prob: float = 0.955,
initial_rollout_std: float = 0.005,
input_mask: list = None,
target_mask: list = None,
gp_approx: str = 'mean_eq',
sparse_gp: bool = False,
online_learning: bool = False,
inertial_prop: list = [1.0],
prior_param_coeff: float = 1.0,
output_dir: str = "results/temp",
**kwargs
):
"""Initialize GP-MPC.
Args:
env_func (gym.Env): functionalized initialization of the environment.
seed (int): random seed.
horizon (int): MPC planning horizon.
Q, R (np.array): cost weight matrix.
use_prev_start (bool): Warmstart mpc with the previous solution.
train_iterations (int): the number of training examples to use for each dimension of the GP.
validation_iterations (int): the number of points to use use for the test set during training.
optimization_iterations (list): the number of optimization iterations for each dimension of the GP.
learning_rate (list): the learning rate for training each dimension of the GP.
normalize_training_data (bool): Normalize the training data.
use_gpu (bool): use GPU while training the gp.
gp_model_path (str): path to a pretrained GP model. If None, will train a new one.
output_dir (str): directory to store model and results.
prob (float): desired probabilistic safety level.
initial_rollout_std (float): the initial std (across all states) for the mean_eq rollout.
inertial_prop (list): to initialize the inertial properties of the prior model.
prior_param_coeff (float): constant multiplying factor to adjust the prior model intertial properties.
input_mask (list): list of which input dimensions to use in GP model. If None, all are used.
target_mask (list): list of which output dimensions to use in the GP model. If None, all are used.
gp_approx (str): 'mean_eq' used mean equivalence rollout for the GP dynamics. Only one that works currently.
online_learning (bool): if true, GP kernel values will be updated using past trajectory values.
additional_constraints (list): list of Constraint objects defining additional constraints to be used.
"""
print("############################################### GP-MPC hexa ###########################################")
self.prior_env_func = partial(env_func,
inertial_prop=np.array(inertial_prop)*prior_param_coeff)
self.prior_param_coeff = prior_param_coeff
# Initialize the method using linear MPC.
self.prior_ctrl = LinearMPC(
self.prior_env_func,
horizon=horizon,
q_mpc=q_mpc,
r_mpc=r_mpc,
use_prev_start=use_prev_start,
output_dir=output_dir,
additional_constraints=additional_constraints,
)
self.prior_ctrl.reset()
super().__init__(
self.prior_env_func,
horizon=horizon,
q_mpc=q_mpc,
r_mpc=r_mpc,
use_prev_start=use_prev_start,
output_dir=output_dir,
additional_constraints=additional_constraints,
**kwargs)
# Setup environments.
self.env_func = env_func
self.env = env_func(randomized_init=False)
self.env_training = env_func(randomized_init=True)
# No training data accumulated yet so keep the dynamics function as linear prior.
self.train_data = None
self.prior_dynamics_func = self.prior_ctrl.linear_dynamics_func
# GP and training parameters.
self.gaussian_process = None
self.train_iterations = train_iterations
self.validation_iterations = validation_iterations
self.optimization_iterations = optimization_iterations
self.learning_rate = learning_rate
self.gp_model_path = gp_model_path
self.normalize_training_data = normalize_training_data
self.use_gpu = use_gpu
self.seed = seed
self.prob = prob
self.sparse_gp = sparse_gp
if input_mask is None:
self.input_mask = np.arange(self.model.nx + self.model.nu).tolist()
else:
self.input_mask = input_mask
if target_mask is None:
self.target_mask = np.arange(self.model.nx).tolist()
else:
self.target_mask = target_mask
Bd = np.eye(self.model.nx)
self.Bd = Bd[:, self.target_mask]
self.gp_approx = gp_approx
self.online_learning = online_learning
self.last_obs = None
self.last_action = None
self.initial_rollout_std = initial_rollout_std
def setup_prior_dynamics(self):
"""Computes the LQR gain used for propograting GP uncertainty from the prior model dynamics.
"""
# Determine the LQR gain K to propogate the input uncertainty (doing this at each timestep will increase complexity).
A, B = discretize_linear_system(self.prior_ctrl.dfdx, self.prior_ctrl.dfdu, self.dt)
Q_lqr = self.Q
R_lqr = self.R
P = scipy.linalg.solve_discrete_are(A, B, Q_lqr, R_lqr)
btp = np.dot(B.T, P)
self.lqr_gain = -np.dot(np.linalg.inv(self.R + np.dot(btp, B)), np.dot(btp, A))
self.discrete_dfdx = A
self.discrete_dfdu = B
def set_gp_dynamics_func(self):
"""Updates symbolic dynamics.
With actual control frequency, initialize GP model and add to the combined dynamics.
"""
self.setup_prior_dynamics()
# Compute the probabilistic constraint inverse CDF according to section III.D.b in Hewing 2019.
self.inverse_cdf = scipy.stats.norm.ppf(1 - (1/self.model.nx - (self.prob + 1)/(2*self.model.nx)))
self.create_sparse_GP_machinery()
def create_sparse_GP_machinery(self):
"""This setups the gaussian process approximations for FITC formulation.
"""
lengthscales, signal_var, noise_var, gp_K_plus_noise = self.gaussian_process.get_hyperparameters(as_numpy=True)
self.length_scales = lengthscales.squeeze()
self.signal_var = signal_var.squeeze()
self.noise_var = noise_var.squeeze()
self.gp_K_plus_noise = gp_K_plus_noise
Nx = len(self.input_mask)
Ny = len(self.target_mask)
N = self.gaussian_process.n_training_samples
# Create CasADI function for computing the kernel K_z_zind with parameters for z, z_ind, length scales and signal variance.
# We need the CasADI version of this so that it can by symbolically differentiated in in the MPC optimization.
z1 = cs.SX.sym('z1', Nx)
z2 = cs.SX.sym('z2', Nx)
ell_s = cs.SX.sym('ell', Nx)
sf2_s = cs.SX.sym('sf2')
z_ind = cs.SX.sym('z_ind', self.T, Nx)
covSE = cs.Function('covSE', [z1, z2, ell_s, sf2_s],
[covSEard(z1, z2, ell_s, sf2_s)])
ks = cs.SX.zeros(1, self.T)
for i in range(self.T):
ks[i] = covSE(z1, z_ind[i, :], ell_s, sf2_s)
ks_func = cs.Function('K_s', [z1, z_ind, ell_s, sf2_s], [ks])
K_z_zind = cs.SX.zeros(Ny, self.T)
for i in range(Ny):
K_z_zind[i,:] = ks_func(z1, z_ind, self.length_scales[i,:], self.signal_var[i])
# This will be mulitplied by the mean_post_factor computed at every time step to compute the approximate mean.
self.K_z_zind_func = cs.Function('K_z_zind', [z1, z_ind],[K_z_zind],['z1', 'z2'],['K'])
def preprocess_training_data(self,
x_seq,
u_seq,
x_next_seq
):
"""Converts trajectory data for GP trianing.
Args:
x_seq (list): state sequence of np.array (nx,).
u_seq (list): action sequence of np.array (nu,).
x_next_seq (list): next state sequence of np.array (nx,).
Returns:
np.array: inputs for GP training, (N, nx+nu).
np.array: targets for GP training, (N, nx).
"""
# Get the predicted dynamics. This is a linear prior, thus we need to account for the fact that
# it is linearized about an eq using self.X_GOAL and self.U_GOAL.
x_pred_seq = self.prior_dynamics_func(x0=x_seq.T - self.prior_ctrl.X_LIN[:, None],
p=u_seq.T - self.prior_ctrl.U_LIN[:,None])['xf'].toarray()
targets = (x_next_seq.T - (x_pred_seq+self.prior_ctrl.X_LIN[:,None])).transpose() # (N, nx).
inputs = np.hstack([x_seq, u_seq]) # (N, nx+nu).
return inputs, targets
def precompute_probabilistic_limits(self,
print_sets=True
):
"""This updates the constraint value limits to account for the uncertainty in the dynamics rollout.
Args:
print_sets (bool): True to print out the sets for debugging purposes.
"""
nx, nu = self.model.nx, self.model.nu
T = self.T
state_covariances = np.zeros((self.T+1, nx, nx))
input_covariances = np.zeros((self.T, nu, nu))
# Initilize lists for the tightening of each constraint.
state_constraint_set = []
for state_constraint in self.constraints.state_constraints:
state_constraint_set.append(np.zeros((state_constraint.num_constraints, T+1)))
input_constraint_set = []
for input_constraint in self.constraints.input_constraints:
input_constraint_set.append(np.zeros((input_constraint.num_constraints, T)))
if self.x_prev is not None and self.u_prev is not None:
cov_x = np.diag([self.initial_rollout_std**2]*nx)
for i in range(T):
state_covariances[i] = cov_x
cov_u = self.lqr_gain @ cov_x @ self.lqr_gain.T
input_covariances[i] = cov_u
cov_xu = cov_x @ self.lqr_gain.T
z = np.hstack((self.x_prev[:,i], self.u_prev[:,i]))
if self.gp_approx == 'taylor':
raise NotImplementedError("Taylor GP approximation is currently not working.")
elif self.gp_approx == 'mean_eq':
_, cov_d_tensor = self.gaussian_process.predict(z[None,:], return_pred=False)
cov_d = cov_d_tensor.detach().numpy()
else:
raise NotImplementedError('gp_approx method is incorrect or not implemented')
# Loop through input constraints and tighten by the required ammount.
for ui, input_constraint in enumerate(self.constraints.input_constraints):
input_constraint_set[ui][:, i] = -1*self.inverse_cdf * \
np.absolute(input_constraint.A) @ np.sqrt(np.diag(cov_u))
for si, state_constraint in enumerate(self.constraints.state_constraints):
state_constraint_set[si][:, i] = -1*self.inverse_cdf * \
np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))
if self.gp_approx == 'taylor':
raise NotImplementedError("Taylor GP rollout not implemented.")
elif self.gp_approx == 'mean_eq':
# Compute the next step propogated state covariance using mean equivilence.
cov_x = self.discrete_dfdx @ cov_x @ self.discrete_dfdx.T + \
self.discrete_dfdx @ cov_xu @ self.discrete_dfdu.T + \
self.discrete_dfdu @ cov_xu.T @ self.discrete_dfdx.T + \
self.discrete_dfdu @ cov_u @ self.discrete_dfdu.T + \
self.Bd @ cov_d @ self.Bd.T
else:
raise NotImplementedError('gp_approx method is incorrect or not implemented')
# Udate Final covariance.
for si, state_constraint in enumerate(self.constraints.state_constraints):
state_constraint_set[si][:,-1] = -1 * self.inverse_cdf * \
np.absolute(state_constraint.A) @ np.sqrt(np.diag(cov_x))
state_covariances[-1] = cov_x
if print_sets:
print("Probabilistic State Constraint values along Horizon:")
print(state_constraint_set)
print("Probabilistic Input Constraint values along Horizon:")
print(input_constraint_set)
self.results_dict['input_constraint_set'].append(input_constraint_set)
self.results_dict['state_constraint_set'].append(state_constraint_set)
self.results_dict['state_horizon_cov'].append(state_covariances)
self.results_dict['input_horizon_cov'].append(input_covariances)
return state_constraint_set, input_constraint_set
def precompute_sparse_gp_values(self):
"""Uses the last MPC solution to precomupte values associated with the FITC GP approximation.
"""
n_data_points = self.gaussian_process.n_training_samples
dim_gp_inputs = len(self.input_mask)
dim_gp_outputs = len(self.target_mask)
inputs = self.train_data['train_inputs']
targets = self.train_data['train_targets']
# Get the inducing points.
if self.x_prev is not None and self.u_prev is not None:
# Use the previous MPC solution as in Hewing 2019.
z_ind = np.hstack((self.x_prev[:,:-1].T, self.u_prev.T))
z_ind = z_ind[:,self.input_mask]
else:
# If there is no previous solution. Choose T random training set points.
inds = self.env.np_random.choice(range(n_data_points), size=self.T)
#z_ind = self.data_inputs[inds][:, self.input_mask]
z_ind = inputs[inds][:, self.input_mask]
K_zind_zind = self.gaussian_process.kernel(torch.Tensor(z_ind).double())
K_zind_zind_inv = self.gaussian_process.kernel_inv(torch.Tensor(z_ind).double())
K_x_zind = self.gaussian_process.kernel(torch.from_numpy(inputs[:, self.input_mask]).double(),
torch.Tensor(z_ind).double())
Q_X_X = K_x_zind @ K_zind_zind_inv @ K_x_zind.transpose(1,2)
Gamma = torch.diagonal(self.gaussian_process.K_plus_noise + Q_X_X, 0, 1, 2)
Gamma_inv = torch.diag_embed(1/Gamma)
Sigma = torch.pinverse(K_zind_zind + K_x_zind.transpose(1,2) @ Gamma_inv @ K_x_zind)
mean_post_factor = torch.zeros((dim_gp_outputs, self.T))
for i in range(dim_gp_outputs):
mean_post_factor[i] = Sigma[i] @ K_x_zind[i].T @ Gamma_inv[i] @ \
torch.from_numpy(targets[:,self.target_mask[i]]).double()
return mean_post_factor.detach().numpy(), Sigma.detach().numpy(), K_zind_zind_inv.detach().numpy(), z_ind
def setup_gp_optimizer(self):
"""Sets up nonlinear optimization problem including cost objective, variable bounds and dynamics constraints.
"""
nx, nu = self.model.nx, self.model.nu
T = self.T
# Define optimizer and variables.
opti = cs.Opti()
# States.
x_var = opti.variable(nx, T + 1)
# Inputs.
u_var = opti.variable(nu, T)
# Initial state.
x_init = opti.parameter(nx, 1)
# Reference (equilibrium point or trajectory, last step for terminal cost).
x_ref = opti.parameter(nx, T + 1)
# Chance constraint limits.
state_constraint_set = []
for state_constraint in self.constraints.state_constraints:
state_constraint_set.append(opti.parameter(state_constraint.num_constraints, T+1))
input_constraint_set = []
for input_constraint in self.constraints.input_constraints:
input_constraint_set.append(opti.parameter(input_constraint.num_constraints, T))
# Sparse GP mean postfactor matrix.
mean_post_factor = opti.parameter(len(self.target_mask), T)
# Sparse GP inducing points.
z_ind = opti.parameter(T, len(self.input_mask))
# Cost (cumulative).
cost = 0
cost_func = self.model.loss
for i in range(T):
cost += cost_func(x=x_var[:, i],
u=u_var[:, i],
Xr=x_ref[:, i],
Ur=np.zeros((nu, 1)),
Q=self.Q,
R=self.R)["l"]
# Terminal cost.
cost += cost_func(x=x_var[:, -1],
u=np.zeros((nu, 1)),
Xr=x_ref[:, -1],
Ur=np.zeros((nu, 1)),
Q=self.Q,
R=self.R)["l"]
opti.minimize(cost)
z = cs.vertcat(x_var[:,:-1], u_var)
z = z[self.input_mask,:]
for i in range(self.T):
# Dynamics constraints using the dynamics of the prior and the mean of the GP.
# This follows the tractable dynamics formulation in Section III.B in Hewing 2019.
# Note that for the GP approximation, we are purposely using elementwise multiplication *.
if self.sparse_gp:
next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],
p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \
self.prior_ctrl.X_LIN[:,None]+ self.Bd @ cs.sum2(self.K_z_zind_func(z1=z[:,i].T, z2=z_ind)['K'] * mean_post_factor)
else:
# Sparse GP approximation doesn't always work well, thus, use Exact GP regression. This is much slower,
# but for unstable systems, make performance much better.
next_state = self.prior_dynamics_func(x0=x_var[:, i]-self.prior_ctrl.X_LIN[:,None],
p=u_var[:, i]-self.prior_ctrl.U_LIN[:,None])['xf'] + \
self.prior_ctrl.X_LIN[:,None]+ self.Bd @ self.gaussian_process.casadi_predict(z=z[:,i])['mean']
opti.subject_to(x_var[:, i + 1] == next_state)
# Probabilistic state and input constraints according to Hewing 2019 constraint tightening.
for s_i, state_constraint in enumerate(self.state_constraints_sym):
opti.subject_to(state_constraint(x_var[:, i]) <= state_constraint_set[s_i][:,i])
for u_i, input_constraint in enumerate(self.input_constraints_sym):
opti.subject_to(input_constraint(u_var[:, i]) <= input_constraint_set[u_i][:,i])
# Final state constraints.
for s_i, state_constraint in enumerate(self.state_constraints_sym):
opti.subject_to(state_constraint(x_var[:, -1]) <= state_constraint_set[s_i][:,-1])
# Initial condition constraints.
opti.subject_to(x_var[:, 0] == x_init)
# Create solver (IPOPT solver in this version).
opts = {"ipopt.print_level": 4,
"ipopt.sb": "yes",
"ipopt.max_iter": 100, #100,
"print_time": 1}
opti.solver('ipopt', opts)
self.opti_dict = {
"opti": opti,
"x_var": x_var,
"u_var": u_var,
"x_init": x_init,
"x_ref": x_ref,
"state_constraint_set": state_constraint_set,
"input_constraint_set": input_constraint_set,
"mean_post_factor": mean_post_factor,
"z_ind": z_ind,
"cost": cost
}
def select_action_with_gp(self,
obs
):
"""Solves nonlinear MPC problem to get next action.
Args:
obs (np.array): current state/observation.
Returns:
np.array: input/action to the task/env.
"""
opti_dict = self.opti_dict
opti = opti_dict["opti"]
x_var = opti_dict["x_var"]
u_var = opti_dict["u_var"]
x_init = opti_dict["x_init"]
x_ref = opti_dict["x_ref"]
state_constraint_set = opti_dict["state_constraint_set"]
input_constraint_set = opti_dict["input_constraint_set"]
mean_post_factor = opti_dict["mean_post_factor"]
z_ind = opti_dict["z_ind"]
cost = opti_dict["cost"]
# Assign the initial state.
opti.set_value(x_init, obs)
# Assign reference trajectory within horizon.
goal_states = self.get_references()
opti.set_value(x_ref, goal_states)
if self.mode == "tracking":
self.traj_step += 1
# Set the probabilistic state and input constraint set limits.
state_constraint_set_prev, input_constraint_set_prev = self.precompute_probabilistic_limits()
for si in range(len(self.constraints.state_constraints)):
opti.set_value(state_constraint_set[si], state_constraint_set_prev[si])
for ui in range(len(self.constraints.input_constraints)):
opti.set_value(input_constraint_set[ui], input_constraint_set_prev[ui])
mean_post_factor_val, Sigma, K_zind_zind_inv, z_ind_val = self.precompute_sparse_gp_values()
opti.set_value(mean_post_factor, mean_post_factor_val)
opti.set_value(z_ind, z_ind_val)
# Initial guess for the optimization problem.
if self.warmstart and self.x_prev is not None and self.u_prev is not None:
# shift previous solutions by 1 step
x_guess = deepcopy(self.x_prev)
u_guess = deepcopy(self.u_prev)
x_guess[:, :-1] = x_guess[:, 1:]
u_guess[:-1] = u_guess[1:]
opti.set_initial(x_var, x_guess)
opti.set_initial(u_var, u_guess)
# Solve the optimization problem.
try:
sol = opti.solve()
x_val, u_val = sol.value(x_var), sol.value(u_var)
except RuntimeError:
x_val, u_val = opti.debug.value(x_var), opti.debug.value(u_var)
u_val = np.atleast_2d(u_val)
self.x_prev = x_val
self.u_prev = u_val
self.results_dict['horizon_states'].append(deepcopy(self.x_prev))
self.results_dict['horizon_inputs'].append(deepcopy(self.u_prev))
zi = np.hstack((x_val[:,0], u_val[:,0]))
zi = zi[self.input_mask]
gp_contribution = np.sum(self.K_z_zind_func(z1=zi, z2=z_ind_val)['K'].toarray() * mean_post_factor_val,axis=1)
print("GP Mean eq Contribution: %s" % gp_contribution)
zi = np.hstack((x_val[:,0], u_val[:,0]))
pred, _, _ = self.gaussian_process.predict(zi[None,:])
print("True GP value: %s" % pred.numpy())
lin_pred = self.prior_dynamics_func(x0=x_val[:,0]-self.prior_ctrl.X_LIN,
p=u_val[:, 0]-self.prior_ctrl.U_LIN)['xf'].toarray() + \
self.prior_ctrl.X_LIN[:,None]
self.results_dict['linear_pred'].append(lin_pred)
self.results_dict['gp_mean_eq_pred'].append(gp_contribution)
self.results_dict['gp_pred'].append(pred.numpy())
# Take the first one from solved action sequence.
if u_val.ndim > 1:
action = u_val[:, 0]
else:
action = np.array([u_val[0]])
self.prev_action = action,
return action
def learn(self,
input_data=None,
target_data=None,
gp_model=None,
plot=False
):
"""Performs GP training.
Args:
input_data, target_data (optiona, np.array): data to use for training
gp_model (str): if not None, this is the path to pretrained models to use instead of training new ones.
plot (bool): to plot validation trajectories or not.
Returns:
training_results (dict): Dictionary of the training results.
"""
if gp_model is None:
gp_model = self.gp_model_path
self.prior_ctrl.remove_constraints(self.prior_ctrl.additional_constraints)
self.reset()
if self.online_learning:
input_data = np.zeros((self.train_iterations, len(self.input_mask)))
target_data = np.zeros((self.train_iterations, len(self.target_mask)))
if input_data is None and target_data is None:
train_inputs = []
train_targets = []
train_info = []
############
# Use Latin Hypercube Sampling to generate states withing environment bounds.
lhs_sampler = Lhs(lhs_type='classic', criterion='maximin')
limits = [(self.env.INIT_STATE_RAND_INFO[key].low, self.env.INIT_STATE_RAND_INFO[key].high) for key in
self.env.INIT_STATE_RAND_INFO]
# todo: parameterize this if we actually want it.
num_eq_samples = 0
samples = lhs_sampler.generate(limits,
self.train_iterations + self.validation_iterations - num_eq_samples,
random_state=self.seed)
# todo: choose if we want eq samples or not.
delta = 0.01
eq_limits = [(self.prior_ctrl.X_LIN[eq]-delta, self.prior_ctrl.X_LIN[eq]+delta) for eq in range(self.model.nx)]
if num_eq_samples > 0:
eq_samples = lhs_sampler.generate(eq_limits, num_eq_samples, random_state=self.seed)
#samples = samples.append(eq_samples)
init_state_samples = np.array(samples + eq_samples)
else:
init_state_samples = np.array(samples)
input_limits = np.vstack((self.constraints.input_constraints[0].lower_bounds,
self.constraints.input_constraints[0].upper_bounds)).T
input_samples = lhs_sampler.generate(input_limits,
self.train_iterations + self.validation_iterations,
random_state=self.seed)
input_samples = np.array(input_samples) # not being used currently
seeds = self.env.np_random.randint(0,99999, size=self.train_iterations + self.validation_iterations)
load_from_file = False
if load_from_file:
gpmpc_data = np.load("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand_good1.npz")
x_seq_all = gpmpc_data["x_seq_all"]
x_next_seq_all = gpmpc_data["x_next_seq_all"]
u_seq_all = gpmpc_data["u_seq_all"]
else:
x_seq_all = []
u_seq_all = []
x_next_seq_all = []
for i in range(self.train_iterations + self.validation_iterations):
if load_from_file:
x_seq = x_seq_all[i]
x_next_seq = x_next_seq_all[i]
u_seq = u_seq_all[i]
else:
# For random initial state training.
init_state = init_state_samples[i,:]
# Collect data with prior controller.
run_env = self.env_func(init_state=init_state, randomized_init=False, seed=int(seeds[i]))
episode_results = self.prior_ctrl.run(env=run_env, max_steps=1, gp_training = True)
run_env.close()
x_obs = episode_results['obs'][-3:,:]
u_seq = episode_results['action'][-1:,:]
run_env.close()
x_seq = x_obs[:-1,:]
x_next_seq = x_obs[1:,:]
x_seq_all.append(x_seq)
x_next_seq_all.append(x_next_seq)
u_seq_all.append(u_seq)
train_inputs_i, train_targets_i = self.preprocess_training_data(x_seq, u_seq, x_next_seq)
train_inputs.append(train_inputs_i)
train_targets.append(train_targets_i)
np.savez("/home/erl/repos/journal_zhichao/safe-control-gym/experiments/annual_reviews/figure6/data/small_drone/statecontroldata_rand.npz", x_seq_all = x_seq_all, x_next_seq_all = x_next_seq_all, u_seq_all = u_seq_all)
###########
else:
train_inputs = input_data
train_targets = target_data
# assign all data
train_inputs = np.vstack(train_inputs)
train_targets = np.vstack(train_targets)
self.data_inputs = train_inputs
self.data_targets = train_targets
train_idx, test_idx = train_test_split(
#list(range(self.train_iterations + self.validation_iterations)),
list(range(train_inputs.shape[0])),
test_size=self.validation_iterations/(self.train_iterations+self.validation_iterations),
random_state=self.seed
)
train_inputs = self.data_inputs[train_idx, :]
train_targets = self.data_targets[train_idx, :]
self.train_data = {'train_inputs': train_inputs, 'train_targets': train_targets}
test_inputs = self.data_inputs[test_idx, :]
test_targets = self.data_targets[test_idx, :]
self.test_data = {'test_inputs': test_inputs, 'test_targets': test_targets}
train_inputs_tensor = torch.Tensor(train_inputs).double()
train_targets_tensor = torch.Tensor(train_targets).double()
test_inputs_tensor = torch.Tensor(test_inputs).double()
test_targets_tensor = torch.Tensor(test_targets).double()
if plot:
init_state = np.array([-1.0, 0.0, 0.0, 0.0, 0.0, 0.0])
valid_env = self.env_func(init_state=init_state,
randomized_init=False)
validation_results = self.prior_ctrl.run(env=valid_env,
max_steps=40)
valid_env.close()
x_obs = validation_results['obs']
u_seq = validation_results['action']
x_seq = x_obs[:-1, :]
x_next_seq = x_obs[1:, :]
# Define likelihood.
likelihood = gpytorch.likelihoods.GaussianLikelihood(
noise_constraint=gpytorch.constraints.GreaterThan(1e-6),
).double()
self.gaussian_process = GaussianProcessCollection(ZeroMeanIndependentGPModel,
likelihood,
len(self.target_mask),
input_mask=self.input_mask,
target_mask=self.target_mask,
normalize=self.normalize_training_data
)
if gp_model:
self.gaussian_process.init_with_hyperparam(train_inputs_tensor,
train_targets_tensor,
gp_model)
else:
# Train the GP.
self.gaussian_process.train(train_inputs_tensor,
train_targets_tensor,
test_inputs_tensor,
test_targets_tensor,
n_train=self.optimization_iterations,
learning_rate=self.learning_rate,
gpu=self.use_gpu,
dir=self.output_dir)
# Plot validation.
if plot:
validation_inputs, validation_targets = self.preprocess_training_data(x_seq, u_seq, x_next_seq)
fig_count = 0
fig_count = self.gaussian_process.plot_trained_gp(torch.Tensor(validation_inputs).double(),
torch.Tensor(validation_targets).double(),
fig_count=fig_count)
self.set_gp_dynamics_func()
self.setup_gp_optimizer()
self.prior_ctrl.add_constraints(self.prior_ctrl.additional_constraints)
self.prior_ctrl.reset()
# Collect training results.
training_results = {}
training_results['train_targets'] = train_targets
training_results['train_inputs'] = train_inputs
try:
training_results['info'] = train_info
except UnboundLocalError:
training_results['info'] = None
return training_results
def select_action(self,
obs
):
"""Select the action based on the given observation.
Args:
obs (np.array): current observed state.
Returns:
action (np.array): desired policy action.
"""
if self.gaussian_process is None:
action = self.prior_ctrl.select_action(obs)
else:
if(self.last_obs is not None and self.last_action is not None and self.online_learning):
print("[ERROR]: Not yet supported.")
exit()
t1 = time.perf_counter()
action = self.select_action_with_gp(obs)
t2 = time.perf_counter()
print("GP SELECT ACTION TIME: %s" %(t2 - t1))
self.last_obs = obs
self.last_action = action
return action
def close(self):
"""Clean up.
"""
self.env_training.close()
self.env.close()
def reset_results_dict(self):
"""
"""
"Result the results_dict before running."
super().reset_results_dict()
self.results_dict['input_constraint_set'] = []
self.results_dict['state_constraint_set'] = []
self.results_dict['state_horizon_cov'] = []
self.results_dict['input_horizon_cov'] = []
self.results_dict['gp_mean_eq_pred'] = []
self.results_dict['gp_pred'] = []
self.results_dict['linear_pred'] = []
def reset(self):
"""Reset the controller before running.
"""
# Setup reference input.
if self.env.TASK == Task.STABILIZATION:
self.mode = "stabilization"
self.x_goal = self.env.X_GOAL
elif self.env.TASK == Task.TRAJ_TRACKING:
self.mode = "tracking"
self.traj = self.env.X_GOAL.T
self.traj_step = 0
# Dynamics model.
if self.gaussian_process is not None:
self.set_gp_dynamics_func()
# CasADi optimizer.
self.setup_gp_optimizer()
self.prior_ctrl.reset()
# Previously solved states & inputs, useful for warm start.
self.x_prev = None
self.u_prev = None
| [
"torch.diag_embed",
"torch.Tensor",
"torch.zeros",
"torch.diagonal",
"torch.from_numpy"
] | 1.10.2 | thaipduong/safe-control-gym | 69f8f627d232d50813a7fff6113dd6d5caccf930 |
1.4 | """ Pooling-based Vision Transformer (PiT) in PyTorch
A PyTorch implement of Pooling-based Vision Transformers as described in
'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302
This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.
Modifications for timm by / Copyright 2020 Ross Wightman
"""
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
import math
import re
from functools import partial
from typing import Tuple
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import trunc_normal_, to_2tuple
from .registry import register_model
from .vision_transformer import Block
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# deit models (FB weights)
'pit_ti_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'),
'pit_xs_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'),
'pit_s_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'),
'pit_b_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'),
'pit_ti_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth',
classifier=('head', 'head_dist')),
'pit_xs_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth',
classifier=('head', 'head_dist')),
'pit_s_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth',
classifier=('head', 'head_dist')),
'pit_b_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth',
classifier=('head', 'head_dist')),
}
class SequentialTuple(nn.Sequential):
""" This module exists to work around torchscript typing issues list -> list"""
def __init__(self, *args):
super(SequentialTuple, self).__init__(*args)
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for module in self:
x = module(x)
return x
class Transformer(nn.Module):
def __init__(
self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None):
super(Transformer, self).__init__()
self.layers = nn.ModuleList([])
embed_dim = base_dim * heads
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6)
)
for i in range(depth)])
self.pool = pool
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
x, cls_tokens = x
B, C, H, W = x.shape
token_length = cls_tokens.shape[1]
x = x.flatten(2).transpose(1, 2)
x = torch.cat((cls_tokens, x), dim=1)
x = self.blocks(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = x.transpose(1, 2).reshape(B, C, H, W)
if self.pool is not None:
x, cls_tokens = self.pool(x, cls_tokens)
return x, cls_tokens
class ConvHeadPooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(ConvHeadPooling, self).__init__()
self.conv = nn.Conv2d(
in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride,
padding_mode=padding_mode, groups=in_feature)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class ConvEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, patch_size, stride, padding):
super(ConvEmbedding, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
class PoolingVisionTransformer(nn.Module):
""" Pooling-based Vision Transformer
A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'
- https://arxiv.org/abs/2103.16302
"""
def __init__(self, img_size, patch_size, stride, base_dims, depth, heads,
mlp_ratio, num_classes=1000, in_chans=3, distilled=False,
attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0):
super(PoolingVisionTransformer, self).__init__()
padding = 0
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1)
width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1)
self.base_dims = base_dims
self.heads = heads
self.num_classes = num_classes
self.num_tokens = 2 if distilled else 1
self.patch_size = patch_size
self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width))
self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding)
self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0]))
self.pos_drop = nn.Dropout(p=drop_rate)
transformers = []
# stochastic depth decay rule
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]
for stage in range(len(depth)):
pool = None
if stage < len(heads) - 1:
pool = ConvHeadPooling(
base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2)
transformers += [Transformer(
base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool,
drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage])
]
self.transformers = SequentialTuple(*transformers)
self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.num_features = self.embed_dim = base_dims[-1] * heads[-1]
# Classifier head
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
if self.head_dist is not None:
return self.head, self.head_dist
else:
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.head_dist is not None:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x, cls_tokens = self.transformers((x, cls_tokens))
cls_tokens = self.norm(cls_tokens)
if self.head_dist is not None:
return cls_tokens[:, 0], cls_tokens[:, 1]
else:
return cls_tokens[:, 0]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
return x, x_dist
else:
return (x + x_dist) / 2
else:
return self.head(x)
def checkpoint_filter_fn(state_dict, model):
""" preprocess checkpoints """
out_dict = {}
p_blocks = re.compile(r'pools\.(\d)\.')
for k, v in state_dict.items():
# FIXME need to update resize for PiT impl
# if k == 'pos_embed' and v.shape != model.pos_embed.shape:
# # To resize pos embedding when using model at different size from pretrained weights
# v = resize_pos_embed(v, model.pos_embed)
k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k)
out_dict[k] = v
return out_dict
def _create_pit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
PoolingVisionTransformer, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def pit_b_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_b_224', pretrained, **model_kwargs)
@register_model
def pit_s_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_s_224', pretrained, **model_kwargs)
@register_model
def pit_xs_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_xs_224', pretrained, **model_kwargs)
@register_model
def pit_ti_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_ti_224', pretrained, **model_kwargs)
@register_model
def pit_b_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_s_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_xs_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_ti_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Identity",
"torch.nn.ModuleList",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.jit.is_scripting",
"torch.randn"
] | 1.4.0 | Animatory/pytorch-image-models | 3ace100fcfdab3619dc71307613c42e53fb70221 |
1.1 | '''
This code is based on pytorch_ssd and RFBNet.
Details about the modules:
TUM - Thinned U-shaped Module
MLFPN - Multi-Level Feature Pyramid Network
M2Det - Multi-level Multi-scale single-shot object Detector
Author: Qijie Zhao ([email protected])
Finished Date: 01/17/2019
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
import warnings
warnings.filterwarnings('ignore')
from ..registry import NECKS
from ..utils import ConvModule
class TUM(nn.Module):
def __init__(self, first_level=True, input_planes=128, is_smooth=True, side_channel=512, scales=6,
conv_cfg=None,
norm_cfg=None
):
super(TUM, self).__init__()
self.is_smooth = is_smooth
self.side_channel = side_channel
self.input_planes = input_planes
self.planes = 2 * self.input_planes
self.first_level = first_level
self.scales = scales
self.in1 = input_planes + side_channel if not first_level else input_planes
self.layers = nn.Sequential()
self.layers.add_module('{}'.format(len(self.layers)), ConvModule(self.in1, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
for i in range(self.scales - 2):
if not i == self.scales - 3:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
else:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.toplayer = nn.Sequential(ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
self.latlayer = nn.Sequential()
for i in range(self.scales - 2):
self.latlayer.add_module(
'{}'.format(len(self.latlayer)),
ConvModule(self.planes, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.latlayer.add_module('{}'.format(len(self.latlayer)), ConvModule(self.in1, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
if self.is_smooth:
smooth = list()
for i in range(self.scales - 1):
smooth.append(
ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.smooth = nn.Sequential(*smooth)
def _upsample_add(self, x, y, fuse_type='interp'):
_, _, H, W = y.size()
if fuse_type == 'interp':
return F.interpolate(x, size=(H, W), mode='nearest') + y
else:
raise NotImplementedError
# return nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
def forward(self, x, y):
if not self.first_level:
x = torch.cat([x, y], 1)
conved_feat = [x]
for i in range(len(self.layers)):
x = self.layers[i](x)
conved_feat.append(x)
deconved_feat = [self.toplayer[0](conved_feat[-1])]
for i in range(len(self.latlayer)):
deconved_feat.append(
self._upsample_add(
deconved_feat[i], self.latlayer[i](conved_feat[len(self.layers) - 1 - i])
)
)
if self.is_smooth:
smoothed_feat = [deconved_feat[0]]
for i in range(len(self.smooth)):
smoothed_feat.append(
self.smooth[i](deconved_feat[i + 1])
)
return smoothed_feat
return deconved_feat
class SFAM(nn.Module):
def __init__(self, planes, num_levels, num_scales, compress_ratio=16):
super(SFAM, self).__init__()
self.planes = planes
self.num_levels = num_levels
self.num_scales = num_scales
self.compress_ratio = compress_ratio
self.fc1 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels,
self.planes * self.num_levels // 16,
1, 1, 0)] * self.num_scales)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels // 16,
self.planes * self.num_levels,
1, 1, 0)] * self.num_scales)
self.sigmoid = nn.Sigmoid()
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
attention_feat = []
for i, _mf in enumerate(x):
_tmp_f = self.avgpool(_mf)
_tmp_f = self.fc1[i](_tmp_f)
_tmp_f = self.relu(_tmp_f)
_tmp_f = self.fc2[i](_tmp_f)
_tmp_f = self.sigmoid(_tmp_f)
attention_feat.append(_mf * _tmp_f)
return attention_feat
@NECKS.register_module
class M2FPN(nn.Module):
def __init__(self,
num_levels = 8,
num_scales = 5,
sfam=False,
smooth=True,
in_channels = [512,2048],
out_channels=256, conv_cfg=None,
norm_cfg=None):
'''
M2Det: Multi-level Multi-scale single-shot object Detector
'''
super(M2FPN,self).__init__()
self.planes = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_levels = num_levels
self.num_scales = num_scales
self.sfam = sfam
self.smooth = smooth
self.in_channels = in_channels
self.shallow_out =256
self.deep_out =512
self.construct_modules()
def construct_modules(self,):
# construct tums
for i in range(self.num_levels):
if i == 0:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=True,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=512)) #side channel isn't fixed.
else:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=False,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=self.planes))
self.reduce= ConvModule(self.in_channels[0], self.shallow_out, kernel_size=3, stride=1, padding=1)
self.up_reduce_1= ConvModule(self.in_channels[2], self.in_channels[1], kernel_size=1, stride=1)
self.up_reduce_2= ConvModule(self.in_channels[1], self.deep_out, kernel_size=1, stride=1)
self.Norm = nn.BatchNorm2d(256*8)
self.leach = nn.ModuleList([ConvModule(
self.deep_out+self.shallow_out,
self.planes//2,
kernel_size=(1,1),stride=(1,1))]*self.num_levels)
# construct localization and recognition layers
conv_out = nn.ModuleList()
for i in range(self.num_scales):
conv_out.append(nn.Conv2d(self.planes*self.num_levels,
self.planes,
3, 1, 1))
self.conv_out = nn.ModuleList(conv_out)
# construct SFAM module
if self.sfam:
self.sfam_module = SFAM(self.planes, self.num_levels, self.num_scales, compress_ratio=16)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self,x):
assert len(x)==len(self.in_channels)
# loc,conf = list(),list()
# base_feats = list()
# if 'vgg' in self.net_family:
# for k in range(len(self.base)):
# x = self.base[k](x)
# if k in self.base_out:
# base_feats.append(x)
# elif 'res' in self.net_family:
# base_feats = self.base(x, self.base_out)
up_feats = x[1] + F.interpolate(self.up_reduce_1(x[2]),scale_factor=2,mode='nearest')
base_feature = torch.cat(
(self.reduce(x[0]), F.interpolate(self.up_reduce_2(up_feats),scale_factor=2,mode='nearest')),1
)
# tum_outs is the multi-level multi-scale feature
tum_outs = [getattr(self, 'unet{}'.format(1))(self.leach[0](base_feature), 'none')]
for i in range(1,self.num_levels,1):
tum_outs.append(
getattr(self, 'unet{}'.format(i+1))(
self.leach[i](base_feature), tum_outs[i-1][-1]
)
)
# concat with same scales
sources = [torch.cat([_fx[i-1] for _fx in tum_outs],1) for i in range(self.num_scales, 0, -1)]
# forward_sfam
if self.sfam:
sources = self.sfam_module(sources)
sources[0] = self.Norm(sources[0])
output = []
for (x,cout) in zip(sources, self.conv_out):
output.append(cout(x))
return tuple(output)
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.1 | ningdez/Tianchi_Cancer_303 | 59e9b6f906e48e7508f455ce29b97d430791fcf5 |
1.4 | import torch
import numpy as np
import argparse
import pandas as pd
import sys
import os
from torch import nn
from torch.nn import functional as F
import tqdm
import pprint
from src import utils as ut
import torchvision
from haven import haven_utils as hu
from haven import haven_chk as hc
from src import datasets, models
from torch.utils.data import DataLoader
import exp_configs
from torch.utils.data.sampler import RandomSampler
from src import wrappers
def trainval(exp_dict, savedir_base, reset, metrics_flag=True, datadir=None, cuda=False):
# bookkeeping
# ---------------
# get experiment directory
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
# delete and backup experiment
hc.delete_experiment(savedir, backup_flag=True)
# create folder and save the experiment dictionary
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)
print(pprint.pprint(exp_dict))
print('Experiment saved in %s' % savedir)
# set seed
# ==================
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
device = 'cuda'
torch.cuda.manual_seed_all(seed)
assert torch.cuda.is_available(), 'cuda is not, available please run with "-c 0"'
else:
device = 'cpu'
print('Running on device: %s' % device)
# Dataset
# Load val set and train set
val_set = datasets.get_dataset(dataset_name=exp_dict["dataset"], split="val",
transform=exp_dict.get("transform"),
datadir=datadir)
train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
split="train",
transform=exp_dict.get("transform"),
datadir=datadir)
# Load train loader, val loader, and vis loader
train_loader = DataLoader(train_set,
sampler=RandomSampler(train_set,
replacement=True, num_samples=max(min(500,
len(train_set)),
len(val_set))),
batch_size=exp_dict["batch_size"])
val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict["batch_size"])
vis_loader = DataLoader(val_set, sampler=ut.SubsetSampler(train_set,
indices=[0, 1, 2]),
batch_size=1)
# Create model, opt, wrapper
model_original = models.get_model(exp_dict["model"], exp_dict=exp_dict).cuda()
opt = torch.optim.Adam(model_original.parameters(),
lr=1e-5, weight_decay=0.0005)
model = wrappers.get_wrapper(exp_dict["wrapper"], model=model_original, opt=opt).cuda()
score_list = []
# Checkpointing
# =============
score_list_path = os.path.join(savedir, "score_list.pkl")
model_path = os.path.join(savedir, "model_state_dict.pth")
opt_path = os.path.join(savedir, "opt_state_dict.pth")
if os.path.exists(score_list_path):
# resume experiment
score_list = ut.load_pkl(score_list_path)
model.load_state_dict(torch.load(model_path))
opt.load_state_dict(torch.load(opt_path))
s_epoch = score_list[-1]["epoch"] + 1
else:
# restart experiment
score_list = []
s_epoch = 0
# Run training and validation
for epoch in range(s_epoch, exp_dict["max_epoch"]):
score_dict = {"epoch": epoch}
# visualize
# model.vis_on_loader(vis_loader, savedir=os.path.join(savedir, "images"))
# validate
score_dict.update(model.val_on_loader(val_loader))
# train
score_dict.update(model.train_on_loader(train_loader))
# Add score_dict to score_list
score_list += [score_dict]
# Report and save
print(pd.DataFrame(score_list).tail())
hu.save_pkl(score_list_path, score_list)
hu.torch_save(model_path, model.state_dict())
hu.torch_save(opt_path, opt.state_dict())
print("Saved in %s" % savedir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_group_list', nargs='+')
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-d', '--datadir', required=True)
parser.add_argument('-r', '--reset', default=0, type=int)
parser.add_argument('-ei', '--exp_id', default=None)
parser.add_argument('-c', '--cuda', type=int, default=1)
args = parser.parse_args()
# Collect experiments
# -------------------
if args.exp_id is not None:
# select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json'))
exp_list = [exp_dict]
else:
# select exp group
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
####
# Run experiments or View them
# ----------------------------
# run experiments
for exp_dict in exp_list:
# do trainval
trainval(exp_dict=exp_dict,
savedir_base=args.savedir_base,
reset=args.reset,
datadir=args.datadir,
cuda=args.cuda)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.4.0 | DoranLyong/DeepFish | 3ea3e13653f708d4a8dcb54b990dcc2997edf4e9 |
1.8 | from torch import nn
import torch
from ..base import LinkPredictionBase
from .ConcatFeedForwardNNLayer import ConcatFeedForwardNNLayer
class ConcatFeedForwardNN(LinkPredictionBase):
r"""Specific class for link prediction task.
Parameters
----------
input_size : int
The length of input node embeddings
num_class : int
The number of node catrgoriey for classification
hidden_size : list of int type values
Example for two layers's FeedforwardNN: [50, 20]
activation: the activation function class for each fully connected layer
Default: nn.ReLU()
Example: nn.ReLU(),nn.Sigmoid().
"""
def __init__(self, input_size, hidden_size,num_class,activation=nn.ReLU()):
super(ConcatFeedForwardNN, self).__init__()
self.classifier=ConcatFeedForwardNNLayer(input_size, num_class, hidden_size,activation)
def forward(self, input_graph):
r"""
Forward functions to compute the logits tensor for link prediction.
Parameters
----------
input graph : GraphData
The tensors stored in the node feature field named "node_emb" in the
input_graph are used for link prediction.
Returns
---------
output_graph : GraphData
The computed logit tensor for each pair of nodes in the graph are stored
in the node feature field named "edge_logits".
logit tensor shape is: [num_class]
"""
#get the nod embedding from the graph
node_emb=input_graph.node_features['node_emb']
#add the edges and edge prediction logits into the graph
num_node=node_emb.shape[0]
node_idx_list=[idx for idx in range(num_node)]
src_idx=torch.tensor(node_idx_list).view(-1,1).repeat(1,num_node).view(-1)
dst_idx=torch.tensor(node_idx_list).view(1,-1).repeat(num_node,1).view(-1)
input_graph.add_edges(src_idx,dst_idx)
input_graph.edge_features['logits']=self.classifier(node_emb)
return input_graph
| [
"torch.nn.ReLU",
"torch.tensor"
] | 1.8.0 | stjordanis/graph4nlp | c6ebde32bc77d3a7b78f86a93f19b1c057963ffa |
1.10 | import torch
from node2vec import Node2Vec as Node2Vec_
from .brain_data import BrainData
from torch_geometric.data import Data
from networkx.convert_matrix import from_numpy_matrix
from .utils import binning, LDP
import networkx as nx
from .base_transform import BaseTransform
from numpy import linalg as LA
import numpy as np
class FromSVTransform(BaseTransform):
def __init__(self, sv_transform):
super(FromSVTransform, self).__init__()
self.sv_transform = sv_transform
def __call__(self, data):
keys = list(filter(lambda x: x.startswith('edge_index'), data.keys))
for key in keys:
if key.startswith('edge_index'):
postfix = key[10:]
edge_index = data[f'edge_index{postfix}']
edge_attr = data[f'edge_attr{postfix}']
svdata = Data(edge_index=edge_index, edge_attr=edge_attr, num_nodes=data.num_nodes)
svdata_transformed = self.sv_transform(svdata)
data[f'x{postfix}'] = svdata_transformed.x
data[f'edge_index{postfix}'] = svdata_transformed.edge_index
data[f'edge_attr{postfix}'] = svdata_transformed.edge_attr
return data
def __str__(self):
return self.sv_transform.__class__.__name__
class Identity(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns a diagonal matrix with ones on the diagonal.
:param data: BrainData
:return: torch.Tensor
"""
data.x = torch.diag(torch.ones(data.num_nodes))
return data
class Degree(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns a diagonal matrix with the degree of each node on the diagonal.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
data.x = torch.Tensor(adj.sum(dim=1, keepdim=True)).float()
return data
def __str__(self):
return 'Degree'
class LDPTransform(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with LDP transform.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
data.x = torch.Tensor(
LDP(nx.from_numpy_array(adj.numpy()))
).float()
return data
def __str__(self):
return 'LDP'
class DegreeBin(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with degree bin transform.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
return torch.Tensor(binning(adj.sum(dim=1))).float()
def __str__(self):
return 'Degree_Bin'
class Adj(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns adjacency matrix.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
data.x = adj
return data
def __str__(self):
return 'Adj'
class Eigenvector(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with eigenvector.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
w, v = LA.eig(adj.numpy())
# indices = np.argsort(w)[::-1]
v = v.transpose()
data.x = torch.Tensor(v).float()
return data
class EigenNorm(BaseTransform):
def __call__(self, data: BrainData):
"""
Returns node feature with eigen norm.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
sum_of_rows = adj.sum(dim=1)
adj /= sum_of_rows
adj = torch.nan_to_num(adj)
w, v = LA.eig(adj.numpy())
# indices = np.argsort(w)[::-1]
v = v.transpose()
data.x = torch.Tensor(v).float()
return data
class Node2Vec(BaseTransform):
def __init__(self, feature_dim=32, walk_length=5, num_walks=200, num_workers=4,
window=10, min_count=1, batch_words=4):
super(Node2Vec, self).__init__()
self.feature_dim = feature_dim
self.walk_length = walk_length
self.num_walks = num_walks
self.num_workers = num_workers
self.window = window
self.min_count = min_count
self.batch_words = batch_words
def __call__(self, data):
"""
Returns node feature with node2vec transform.
:param data: BrainData
:return: torch.Tensor
"""
adj = torch.sparse_coo_tensor(data.edge_index, data.edge_attr, [data.num_nodes, data.num_nodes])
adj = adj.to_dense()
if (adj < 0).int().sum() > 0:
# split the adjacency matrix into two (negative and positive) parts
pos_adj = adj.clone()
pos_adj[adj < 0] = 0
neg_adj = adj.clone()
neg_adj[adj > 0] = 0
neg_adj = -neg_adj
adjs = [pos_adj, neg_adj]
else:
adjs = [adj]
xs = []
for adj in adjs:
x = torch.zeros((data.num_nodes, self.feature_dim))
graph = from_numpy_matrix(adj.numpy())
node2vec = Node2Vec_(graph, dimensions=self.feature_dim, walk_length=self.walk_length,
num_walks=self.num_walks, workers=self.num_workers)
model = node2vec.fit(window=self.window, min_count=self.min_count,
batch_words=self.batch_words)
for i in range(data.num_nodes):
x[i] = torch.Tensor(model.wv[f'{i}'].copy())
xs.append(x)
data.x = torch.cat(xs, dim=-1)
return data
def __str__(self):
return 'Node2Vec'
| [
"torch.zeros",
"torch.cat",
"torch.nan_to_num",
"torch.ones",
"torch.sparse_coo_tensor",
"torch.Tensor"
] | 1.10.2 | HennyJie/BrainGB | 96cf6711e2f2e6fa48b699ce3c0d6e318955c4de |
1.7 | """
---
title: CIFAR10 Experiment to try Group Normalization
summary: >
This trains is a simple convolutional neural network that uses group normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment for Group Normalization
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_helpers.module import Module
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.normalization.group_norm import GroupNorm
class Model(Module):
"""
### VGG model for CIFAR-10 classification
"""
def __init__(self, groups: int = 32):
super().__init__()
layers = []
# RGB channels
in_channels = 3
# Number of channels in each layer in each block
for block in [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]]:
# Convolution, Normalization and Activation layers
for channels in block:
layers += [nn.Conv2d(in_channels, channels, kernel_size=3, padding=1),
GroupNorm(groups, channels),
nn.ReLU(inplace=True)]
in_channels = channels
# Max pooling at end of each block
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
# Create a sequential model with the layers
self.layers = nn.Sequential(*layers)
# Final logits layer
self.fc = nn.Linear(512, 10)
def forward(self, x):
# The VGG layers
x = self.layers(x)
# Reshape for classification layer
x = x.view(x.shape[0], -1)
# Final linear layer
return self.fc(x)
class Configs(CIFAR10Configs):
# Number of groups
groups: int = 16
@option(Configs.model)
def model(c: Configs):
"""
### Create model
"""
return Model(c.groups).to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='group norm')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.7 | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e |
1.7 | """
---
title: Compressive Transformer Experiment
summary: This experiment trains a compressive transformer model on tiny Shakespeare dataset.
---
# Compressive Transformer Experiment
This is an annotated PyTorch experiment to train a compressive transformer model.
"""
from typing import List, Tuple, NamedTuple
import torch
import torch.nn as nn
from labml import experiment, tracker, monit, logger
from labml.configs import option
from labml.logger import Text
from labml_helpers.metrics.simple_state import SimpleStateModule
from labml_helpers.module import Module
from labml_helpers.train_valid import BatchIndex, hook_model_outputs
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers.compressive import CompressiveTransformer, AttentionReconstructionLoss, \
CompressiveTransformerLayer, Conv1dCompression
class CompressedMemory(NamedTuple):
mem: List[torch.Tensor]
c_mem: List[torch.Tensor]
class AutoregressiveModel(Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: CompressiveTransformer):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
# Transformer
self.transformer = transformer
# Final layer
self.generator = nn.Linear(d_model, n_vocab)
# Masks
self.mask_x = None
self.mask_mem = None
def forward(self, x: torch.Tensor, mem: CompressedMemory):
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem = []
c_mem = []
# Total length of the memory and compressed memory (for masks)
m_len = len(mem[0]) if mem else 0
if c_mem:
m_len += len(c_mem[0])
# Create a subsequent mask for tokens
if self.mask_x is None or self.mask_x.shape[0] < len(x):
from labml_nn.transformers.utils import subsequent_mask
self.mask_x = subsequent_mask(len(x)).to(x.device)
# Create an all ones (full visibility) mask for memory
if self.mask_mem is None or self.mask_mem.shape[1] < m_len or self.mask_mem.shape[0] < len(x):
self.mask_mem = self.mask_x.new_ones(len(x), m_len, 1)
# Concatenate the masks if there is memory
if m_len:
mask = torch.cat((self.mask_mem[:len(x), :m_len], self.mask_x[:len(x), :len(x)]), dim=1)
# Use only the subsequent mask otherwise
else:
mask = self.mask_x[:len(x), :len(x)]
# Token embeddings
x = self.src_embed(x)
# Run it through the transformer
res, mem = self.transformer(x, mem, c_mem, mask)
# Generate logits of the next token
res = self.generator(res)
#
return res, mem
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configurations can and will be overridden when we start the experiment.
"""
model: AutoregressiveModel
# Token embedding size
d_model: int = 128
# Number of attention heads
heads: int = 4
# Dropout probability
dropout: float = 0.0
# Number of features in FFN hidden layer
d_ff: int = 256
# Number of transformer layers
n_layers: int = 6
# Number of memories to keep
mem_len: int = 8
# State module to maintain memories when switching between training and validation
memory = SimpleStateModule()
# Attention Reconstruction Loss
attention_reconstruction_loss: AttentionReconstructionLoss
# Compression rate
compression_rate: int = 4
# Compressed memory length
c_mem_len: int = 128
def init(self):
# Set tracker configurations
tracker.set_scalar("accuracy.*", True)
tracker.set_scalar("loss.*", True)
# Do not print the attention reconstruction loss in the terminal
tracker.set_scalar("ar_loss.*", False)
# Add a hook to log module outputs
hook_model_outputs(self.mode, self.model, 'model')
# This will keep the accuracy metric stats and memories separate for training and validation.
self.state_modules = [self.accuracy, self.memory]
@torch.no_grad()
def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) \
-> Tuple[CompressedMemory, List[torch.Tensor]]:
"""
Concatenate new memories and compress the oldest memories.
"""
# If the configurations specify not to use memory
if self.mem_len == 0 and self.c_mem_len == 0:
return CompressedMemory([], []), []
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem, c_mem = [], []
# Concatenate new memories with old memory
if mem:
mem = [torch.cat((m, x), dim=0) for m, x in zip(mem, new_mem)]
else:
mem = new_mem
# Compress the oldest memories if there are more memories than `mem_len`
if len(mem[0]) > self.mem_len:
# Calculate the number of compressed memories to make $n_{cm} = \bigg\lceil\frac{n'_m - N_m}{c}\bigg\rceil$,
# where $n'_m$ is the number of memories we have
# and $N_m$ is the maximum number of memories we maintain (`mem_len`).
n_c_mem = (len(mem[0]) - self.mem_len + self.compression_rate - 1) // self.compression_rate
# Number of memories to compress $c n_{cm}$
n_old = n_c_mem * self.compression_rate
# A list to keep memories that need to be compressed for each layer.
mem_to_compress = []
# A list to keep the memories that do not get compressed for each layer.
uncompressed_mem = []
# Iterate through memories of each layer.
for m in mem:
# Split the memories at $c n_{cm}$
cm, m = torch.split(m, [n_old, len(m) - n_old])
# Collect memories to compress
mem_to_compress.append(cm)
# Collect remaining memories
uncompressed_mem.append(m)
# Update the memories
mem = uncompressed_mem
# Compress the memories
new_c_mem = []
for i, layer in enumerate(self.model.transformer.layers):
new_c_mem.append(layer.compress(mem_to_compress[i]))
# Concatenate newly compressed memories with old compressed memories
if c_mem:
c_mem = [torch.cat((m, nm), dim=0) for m, nm in zip(c_mem, new_c_mem)]
# If there are no old compressed memories
else:
c_mem = new_c_mem
# Truncate old memories
if len(c_mem[0]) > self.c_mem_len:
c_mem = [m[-self.c_mem_len:] for m in c_mem]
# No memories are compressed if the number of memories is less than `mem_len`
else:
mem_to_compress = []
# Return memories and the memories that were compressed.
# Memories that were compressed are needed for the reconstruction loss computation.
return CompressedMemory(mem, c_mem), mem_to_compress
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training/validation step
"""
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Whether to capture model outputs
with self.mode.update(is_log_activations=batch_idx.is_last):
# Get memories
mem = self.memory.get()
# Run the model
output, new_mem = self.model(data, mem)
# Merge and compress memory
mem, mem_to_compress = self.merge_compress_memory(mem, new_mem)
# Update memories
self.memory.set(mem)
# Calculate and log cross entropy loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate attention reconstruction loss if memories were compressed in this step
if mem_to_compress:
# Get attention reconstruction loss
ar_loss = self.attention_reconstruction_loss(new_mem, mem_to_compress)
# Track attention reconstruction loss
tracker.add("ar_loss.", ar_loss)
# Add attention reconstruction loss to loss
loss = loss + ar_loss
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Starting prompt
prompt = self.prompt
# Collect output for printing
log = [(prompt, Text.subtle)]
# memory
mem = CompressedMemory([], [])
# Sample 25 tokens
for i in monit.iterate('Sample', 25):
# Tokenize the prompt
data = self.text.text_to_i(prompt).unsqueeze(-1)
# Move to device
data = data.to(self.device)
# Get the model output
output, new_mem = self.model(data, mem)
# Get the model prediction (greedy)
output = output.argmax(dim=-1).squeeze(1)
# Add the prediction to prompt
prompt += self.prompt_separator + self.text.itos[output[-1]]
# Only feed the last character to model in next iteration, rest will go in as memories
prompt = prompt[-1:]
# Add the prediction for logging
log += [(self.prompt_separator + self.text.itos[output[-1]], Text.value)]
# Update and compress memory
mem, _ = self.merge_compress_memory(mem, new_mem)
# Print the sampled output
logger.log(log)
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
### Initialize the auto-regressive model
"""
from labml_nn.transformers.xl import RelativeMultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransformer(
CompressiveTransformerLayer(d_model=c.d_model,
self_attn=RelativeMultiHeadAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout,
compress=Conv1dCompression(c.compression_rate, c.d_model)), c.n_layers))
return m.to(c.device)
@option(Configs.attention_reconstruction_loss)
def attention_reconstruction_loss(c: Configs):
"""
### Initialize the attention reconstruction loss
"""
return AttentionReconstructionLoss(c.model.transformer.layers)
def main():
"""
### Run the experiment
"""
# Create experiment
experiment.create(name="compressive_transformer", comment='')
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 2.5e-4,
'optimizer.optimizer': 'AdamW',
'prompt': 'It is',
'prompt_separator': '',
'train_loader': 'sequential_train_loader',
'valid_loader': 'sequential_valid_loader',
'seq_len': 8,
'mem_len': 8,
'epochs': 128,
'batch_size': 32,
'inner_iterations': 25,
'compression_rate': 2,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
#
if __name__ == '__main__':
main()
| [
"torch.nn.Linear",
"torch.no_grad",
"torch.cat",
"torch.nn.Embedding"
] | 1.7 | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e |
1.7 | """
---
title: Pay Attention to MLPs (gMLP)
summary: >
This is an annotated implementation/tutorial of Pay Attention to MLPs (gMLP) in PyTorch.
---
# Pay Attention to MLPs (gMLP)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Pay Attention to MLPs](https://papers.labml.ai/paper/2105.08050).
This paper introduces a Multilayer Perceptron (MLP) based architecture with gating,
which they name **gMLP**. It consists of a stack of $L$ *gMLP* blocks.
Here is [the training code](experiment.html) for a gMLP model based autoregressive model.
[](https://app.labml.ai/run/01bd941ac74c11eb890c1d9196651a4a)
"""
from typing import Optional
import torch
from torch import nn
class GMLPBlock(nn.Module):
"""
## gMLP Block
Each block does the following transformations to input embeddings
$X \in \mathbb{R}^{n \times d}$ where $n$ is the sequence length
and $d$ is the dimensionality of the embeddings:
\begin{align}
Z &= \sigma(XU) \\
\tilde{Z} &= s(Z) \\
Y &= \tilde{Z}V \\
\end{align}
where $V$ and $U$ are learnable projection weights.
$s(\cdot)$ is the Spacial Gating Unit defined below.
Output dimensionality of $s(\cdot)$ will be half of $Z$.
$\sigma$ is an activation function such as
[GeLU](https://pytorch.org/docs/stable/generated/torch.nn.GELU.html).
"""
def __init__(self, d_model: int, d_ffn: int, seq_len: int):
"""
`d_model` is the dimensionality ($d$) of $X$
`d_ffn` is the dimensionality of $Z$
`seq_len` is the length of the token sequence ($n$)
"""
super().__init__()
# Normalization layer fro Pre-Norm
self.norm = nn.LayerNorm([d_model])
# Activation function $\sigma$
self.activation = nn.GELU()
# Projection layer for $Z = \sigma(XU)$
self.proj1 = nn.Linear(d_model, d_ffn)
# Spacial Gating Unit $s(\cdot)$
self.sgu = SpacialGatingUnit(d_ffn, seq_len)
# Projection layer for $Y = \tilde{Z}V$
self.proj2 = nn.Linear(d_ffn // 2, d_model)
# Embedding size (required by [Encoder](../models.html#Encoder).
# We use the encoder module from transformer architecture and plug
# *gMLP* block as a replacement for the [Transformer Layer](../models.html#Encoder).
self.size = d_model
def forward(self, *, x: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
* `x` is the input embedding tensor $X$ of shape `[seq_len, batch_size, d_model]`
* `mask` is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens
among each other.
"""
# Keep a copy for shortcut connection
shortcut = x
# Normalize $X$
x = self.norm(x)
# Projection and activation $Z = \sigma(XU)$
z = self.activation(self.proj1(x))
# Spacial Gating Unit $\tilde{Z} = s(Z)$
z = self.sgu(z, mask)
# Final projection $Y = \tilde{Z}V$
z = self.proj2(z)
# Add the shortcut connection
return z + shortcut
class SpacialGatingUnit(nn.Module):
"""
## Spatial Gating Unit
$$s(Z) = Z_1 \odot f_{W,b}(Z_2)$$
where $f_{W,b}(Z) = W Z + b$ is a linear transformation along the sequence dimension,
and $\odot$ is element-wise multiplication.
$Z$ is split into to parts of equal size $Z_1$ and $Z_2$ along the channel dimension (embedding dimension).
"""
def __init__(self, d_z: int, seq_len: int):
"""
* `d_z` is the dimensionality of $Z$
* `seq_len` is the sequence length
"""
super().__init__()
# Normalization layer before applying $f_{W,b}(\cdot)$
self.norm = nn.LayerNorm([d_z // 2])
# Weight $W$ in $f_{W,b}(\cdot)$.
#
# The paper notes that it's important to initialize weights to small values and the bias to $1$,
# so that during the initial training $s(\cdot)$ is close to identity (apart from the split).
self.weight = nn.Parameter(torch.zeros(seq_len, seq_len).uniform_(-0.01, 0.01), requires_grad=True)
# Weight $b$ in $f_{W,b}(\cdot)$
#
# The paper notes that it's important to initialize bias to $1$.
self.bias = nn.Parameter(torch.ones(seq_len), requires_grad=True)
def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
* `z` is the input $Z$ of shape `[seq_len, batch_size, d_z]`
* `mask` is is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens
among each other. The last dimension of size `1` is the batch, which we have in other transformer
implementations and was left for compatibility.
"""
# Get sequence length
seq_len = z.shape[0]
# Split $Z$ into $Z_1$ and $Z_2$
z1, z2 = torch.chunk(z, 2, dim=-1)
# Check mask
if mask is not None:
# `mask` has shape `[seq_len_q, seq_len_k, batch_size]`.
# The batch dimension should be of size `1` because this implementation supports
# only same mask for all samples in the batch.
assert mask.shape[0] == 1 or mask.shape[0] == seq_len
assert mask.shape[1] == seq_len
# Here we only support the same mask for all samples
assert mask.shape[2] == 1
# Remove the batch dimension
mask = mask[:, :, 0]
# Normalize $Z_2$ before $f_{W,b}(\cdot)$
z2 = self.norm(z2)
# Get the weight matrix; truncate if larger than `seq_len`
weight = self.weight[:seq_len, :seq_len]
# Apply mask to the weights.
#
# If $W_{i,j}$ is $0$ then $f_{W,b}(Z_2)_i$ will not get any information
# from token $j$.
if mask is not None:
weight = weight * mask
# $f_{W,b}(Z_2) = W Z_2 + b$
z2 = torch.einsum('ij,jbd->ibd', weight, z2) + self.bias[:seq_len, None, None]
# $Z_1 \odot f_{W,b}(Z_2)$
return z1 * z2
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.LayerNorm",
"torch.einsum",
"torch.ones",
"torch.nn.GELU",
"torch.chunk"
] | 1.7 | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e |
1.7 | """
---
title: Evaluate k-nearest neighbor language model
summary: >
This runs the kNN model and merges the kNN results with transformer output to
achieve better results than just using the transformer.
---
# Evaluate k-nearest neighbor language model
"""
from typing import Optional, List
import faiss
import numpy as np
import torch
from labml import monit, lab
from labml.logger import inspect
from labml_nn.transformers.knn.train_model import Configs
def knn(queries: torch.Tensor, index: faiss.IndexFlatL2, keys_store: np.ndarray, vals_store: np.ndarray, n_tokens: int):
"""
## $k$-NN to get $p(w_t, c_t)$
Here we refer to $f(\color{yellowgreen}{c_t})$ as queries,
$f(c_i)$ as keys and $w_i$ as values.
"""
# Save shape of queries to reshape results
queries_shape = queries.shape
# Flatten the `batch` and `sequence` dimensions of queries
queries = queries.view(-1, queries_shape[-1])
# Find 10 nearest neighbors of $f(\color{yellowgreen}{c_t})$ among $f(c_i)$.
# `distance` is the distance given by FAISS and `idx`, $i$ is the index of it in `keys_store`.
distance, idx = index.search(queries.numpy(), 10)
# Get $f(c_i)$
keys_found = queries.new_tensor(keys_store[idx])
# Get $w_i$
vals_found = torch.tensor(vals_store[idx]).squeeze(-1)
# We are going to calculate the cosine similarity between normalized vectors
# Normalize $f(c_i)$
keys_found_n = keys_found / torch.sqrt((keys_found ** 2).sum(-1, keepdims=True) + 1e-10)
# Normalize $f($\color{yellowgreen}{c_t})$
queries_n = queries / torch.sqrt((queries ** 2).sum(-1, keepdims=True) + 1e-10)
# Get the dot-product, or cosine similarity
dot_prod = (keys_found_n * queries_n.unsqueeze(1)).sum(-1)
# Token-wise logits
logits_token = dot_prod.new_zeros(queries.shape[0], n_tokens)
# Scatter and accumulate token logits based on the nearest neighbors
_ = logits_token.scatter_(dim=1, index=vals_found, src=dot_prod, reduce='add')
# Reshape the logits
logits_token = logits_token.reshape(queries_shape[0], queries_shape[1], -1)
return logits_token
def validation_loss(knn_weights: List[float], last_n: Optional[int], conf: Configs, index: faiss.IndexFlatL2,
keys_store: np.ndarray, vals_store: np.ndarray):
"""
## Calculate validation loss
We calculate the validation loss of the combined on $k$-NN prediction and transformer prediction.
The weight given to the $k$-NN model is given by `knn_weight`.
It's a list of weights and we calculate the validation loss for each.
"""
# List of losses for each `knn_weights`
losses = [[] for _ in knn_weights]
# Number of samples in each batch
n_samples = []
with torch.no_grad():
# Iterate through validation data
for i, batch in monit.enum("Validation", conf.validator.data_loader, is_children_silent=True):
# Get data and target labels
data, target = batch[0].to(conf.device), batch[1].to(conf.device)
# Run the model and get predictions $p(w_t, c_t)$
res = conf.model(data)
# Get $k$-NN predictions
res_knn = knn(conf.model.ff_input.cpu(), index, keys_store, vals_store, conf.n_tokens)
res_knn = res_knn.to(conf.device)
# This is to calculate only the loss for `last_n` tokens.
# This is important because the first predictions (along the sequence)
# of transformer model has very few past tokens to look at.
if last_n:
res = res[-last_n:]
res_knn = res_knn[-last_n:]
target = target[-last_n:]
# Number of samples
n_s = res.shape[0] * data.shape[1]
n_samples.append(n_s)
# Calculate scores for each of `knn_weights`.
for i, c in enumerate(knn_weights):
# Calculate the loss
loss = conf.loss_func(res_knn * c + (1 - c) * res, target)
losses[i].append(loss * n_s)
return losses, n_samples
def load_index(conf: Configs, n_probe: int = 8):
"""
## Load the index
"""
# Dimensions of $f(c_i)$
d_model = conf.transformer.d_model
# Training data loader
data_loader = conf.trainer.data_loader
# Number of contexts; i.e. number of tokens in the training data minus one.
# $\big(f(c_i), w_i\big)$ for $i \in [2, T]$
n_keys = data_loader.data.shape[0] * data_loader.data.shape[1] - 1
# Load FAISS index
with monit.section('Load index'):
index = faiss.read_index(str(lab.get_data_path() / 'faiss.index'))
# Set number of cells to probe
index.nprobe = n_probe
# Load memory mapped numpy arrays
keys_store = np.memmap(str(lab.get_data_path() / 'keys.npy'), dtype=np.float32, mode='r', shape=(n_keys, d_model))
vals_store = np.memmap(str(lab.get_data_path() / 'vals.npy'), dtype=np.int, mode='r', shape=(n_keys, 1))
return index, keys_store, vals_store
def main():
from labml_nn.transformers.knn.build_index import load_experiment
# Load the experiment. Replace the run uuid with you run uuid from
# [training the model](train_model.html).
conf = load_experiment('4984b85c20bf11eb877a69c1a03717cd')
# Set model to evaluation mode
conf.model.eval()
# Load index
index, keys_store, vals_store = load_index(conf)
# List of weights given to $k$-NN prediction. We will evaluate the validation loss for
# each of the weights
knn_weights = [i / 20 for i in range(10)]
# Evaluate validation loss
losses, n_samples = validation_loss(knn_weights, None, conf, index, keys_store, vals_store)
# Output the losses for each of `knn_weights`.
inspect({c: np.sum(losses[i]) / np.sum(n_samples) for i, c in enumerate(knn_weights)})
if __name__ == '__main__':
main()
| [
"torch.no_grad",
"torch.tensor"
] | 1.7 | BioGeek/annotated_deep_learning_paper_implementations | e2516cc3063cdfdf11cda05f22a10082297aa33e |
1.7 | import torch
from algorithms.single_model_algorithm import SingleModelAlgorithm
from models.initializer import initialize_model
class GroupDRO(SingleModelAlgorithm):
"""
Group distributionally robust optimization.
Original paper:
@inproceedings{sagawa2019distributionally,
title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},
author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},
booktitle={International Conference on Learning Representations},
year={2019}
}
"""
def __init__(self, config, d_out, grouper, loss, metric, n_train_steps, is_group_in_train):
# check config
assert config.uniform_over_groups
# initialize model
model = initialize_model(config, d_out).to(config.device)
# initialize module
super().__init__(
config=config,
model=model,
grouper=grouper,
loss=loss,
metric=metric,
n_train_steps=n_train_steps,
)
# additional logging
self.logged_fields.append('group_weight')
# step size
self.group_weights_step_size = config.group_dro_step_size
# initialize adversarial weights
self.group_weights = torch.zeros(grouper.n_groups)
self.group_weights[is_group_in_train] = 1
self.group_weights = self.group_weights/self.group_weights.sum()
self.group_weights = self.group_weights.to(self.device)
def process_batch(self, batch):
"""
A helper function for update() and evaluate() that processes the batch
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
all Tensors are of size (batch_size,)
"""
results = super().process_batch(batch)
results['group_weight'] = self.group_weights
return results
def objective(self, results):
"""
Takes an output of SingleModelAlgorithm.process_batch() and computes the
optimized objective. For group DRO, the objective is the weighted average
of losses, where groups have weights groupDRO.group_weights.
Args:
- results (dictionary): output of SingleModelAlgorithm.process_batch()
Output:
- objective (Tensor): optimized objective; size (1,).
"""
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
return group_losses @ self.group_weights
def _update(self, results):
"""
Process the batch, update the log, and update the model, group weights, and scheduler.
Args:
- batch (tuple of Tensors): a batch of data yielded by data loaders
Output:
- results (dictionary): information about the batch, such as:
- g (Tensor)
- y_true (Tensor)
- metadata (Tensor)
- loss (Tensor)
- metrics (Tensor)
- objective (float)
"""
# compute group losses
group_losses, _, _ = self.loss.compute_group_wise(
results['y_pred'],
results['y_true'],
results['g'],
self.grouper.n_groups,
return_dict=False)
# update group weights
self.group_weights = self.group_weights * torch.exp(self.group_weights_step_size*group_losses.data)
self.group_weights = (self.group_weights/(self.group_weights.sum()))
# save updated group weights
results['group_weight'] = self.group_weights
# update model
super()._update(results)
| [
"torch.zeros",
"torch.exp"
] | 1.7.0 | KeAWang/wilds | 3b808a84bd477d7877b77675eec2953128a87033 |
1.7 | from torch import nn
class ConvolutionalBlock(nn.Module):
def __init__(self, in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1, padding_mode='zeros'):
super().__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride,
padding_mode=padding_mode)
self.bn1 = nn.BatchNorm1d(out_channels)
self.relu1 = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
return out
| [
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d"
] | 1.7.1 | plutasnyy/mgr | 4ca5686ba7d62d0e2b8c172f17eb90bd822fdc21 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.