Dataset Viewer
version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.2 | import os
from torch.utils.data import DataLoader
from continuum.datasets import CIFAR10, InMemoryDataset
from continuum.datasets import MNIST
import torchvision
from continuum.scenarios import TransformationIncremental
import pytest
import numpy as np
from continuum.transforms.bg_swap import BackgroundSwap
DATA_PATH = os.environ.get("CONTINUUM_DATA_PATH")
# Uncomment for debugging via image output
# import matplotlib.pyplot as plt
def test_bg_swap_fast():
"""
Fast test for background swap.
"""
bg_x = np.ones(shape=[2, 5, 5, 3]) * -1
bg_y = np.random.rand(2)
fg = np.random.normal(loc=.5, scale=.1, size=[5, 5])
bg = InMemoryDataset(bg_x, bg_y)
bg_swap = BackgroundSwap(bg, input_dim=(5, 5), normalize_bg=None)
spliced_1_channel = bg_swap(fg)[:, :, 0]
assert np.array_equal((spliced_1_channel <= -1), (fg <= .5))
@pytest.mark.slow
def test_background_swap_numpy():
"""
Test background swap on a single ndarray input.
"""
mnist = MNIST(DATA_PATH, download=True, train=True)
cifar = CIFAR10(DATA_PATH, download=True, train=True)
bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))
im = mnist.get_data()[0][0]
im = bg_swap(im)
# Uncomment for debugging
# plt.imshow(im, interpolation='nearest')
# plt.show()
@pytest.mark.slow
def test_background_swap_torch():
"""
Test background swap on a single tensor input.
"""
cifar = CIFAR10(DATA_PATH, download=True, train=True)
mnist = torchvision.datasets.MNIST(DATA_PATH, train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
]))
bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))
im = mnist[0][0]
im = bg_swap(im)
# Uncomment for debugging
# plt.imshow(im.permute(1, 2, 0), interpolation='nearest')
# plt.show()
@pytest.mark.slow
def test_background_tranformation():
"""
Example code using TransformationIncremental to create a setting with 3 tasks.
"""
cifar = CIFAR10(DATA_PATH, train=True)
mnist = MNIST(DATA_PATH, download=False, train=True)
nb_task = 3
list_trsf = []
for i in range(nb_task):
list_trsf.append([torchvision.transforms.ToTensor(), BackgroundSwap(cifar, bg_label=i, input_dim=(28, 28)),
torchvision.transforms.ToPILImage()])
scenario = TransformationIncremental(mnist, base_transformations=[torchvision.transforms.ToTensor()],
incremental_transformations=list_trsf)
folder = "tests/samples/background_trsf/"
if not os.path.exists(folder):
os.makedirs(folder)
for task_id, task_data in enumerate(scenario):
task_data.plot(path=folder, title=f"background_{task_id}.jpg", nb_samples=100, shape=[28, 28, 3])
loader = DataLoader(task_data)
_, _, _ = next(iter(loader))
| [
"torch.utils.data.DataLoader"
] | 1.2.0 | pclucas14/continuum | 09034db1371e9646ca660fd4d4df73e61bf77067 |
1.8 | """Timer class based on the timeit.Timer class, but torch aware."""
import enum
import timeit
import textwrap
from typing import Any, Callable, Dict, List, NoReturn, Optional, Type, Union
import numpy as np
import torch
from torch.utils.benchmark.utils import common, cpp_jit
from torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType
from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface
__all__ = ["Timer", "timer", "Language"]
if torch.has_cuda and torch.cuda.is_available():
def timer() -> float:
torch.cuda.synchronize()
return timeit.default_timer()
else:
timer = timeit.default_timer
class Language(enum.Enum):
PYTHON = 0
CPP = 1
class CPPTimer:
def __init__(
self,
stmt: str,
setup: str,
timer: Callable[[], float],
globals: Dict[str, Any],
) -> None:
if timer is not timeit.default_timer:
raise NotImplementedError(
"PyTorch was built with CUDA and a GPU is present; however "
"Timer does not yet support GPU measurements. If your "
"code is CPU only, pass `timer=timeit.default_timer` to the "
"Timer's constructor to indicate this. (Note that this will "
"produce incorrect results if the GPU is in fact used, as "
"Timer will not synchronize CUDA.)"
)
if globals:
raise ValueError("C++ timing does not support globals.")
self._stmt: str = textwrap.dedent(stmt)
self._setup: str = textwrap.dedent(setup)
self._timeit_module: Optional[TimeitModuleType] = None
def timeit(self, number: int) -> float:
if self._timeit_module is None:
self._timeit_module = cpp_jit.compile_timeit_template(
self._stmt,
self._setup,
)
return self._timeit_module.timeit(number)
class Timer(object):
"""Helper class for measuring execution time of PyTorch statements.
For a full tutorial on how to use this class, see:
https://pytorch.org/tutorials/recipes/recipes/benchmark.html
The PyTorch Timer is based on `timeit.Timer` (and in fact uses
`timeit.Timer` internally), but with several key differences:
1) Runtime aware:
Timer will perform warmups (important as some elements of PyTorch are
lazily initialized), set threadpool size so that comparisons are
apples-to-apples, and synchronize asynchronous CUDA functions when
necessary.
2) Focus on replicates:
When measuring code, and particularly complex kernels / models,
run-to-run variation is a significant confounding factor. It is
expected that all measurements should include replicates to quantify
noise and allow median computation, which is more robust than mean.
To that effect, this class deviates from the `timeit` API by
conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`.
(Exact algorithms are discussed in method docstrings.) The `timeit`
method is replicated for cases where an adaptive strategy is not
desired.
3) Optional metadata:
When defining a Timer, one can optionally specify `label`, `sub_label`,
`description`, and `env`. (Defined later) These fields are included in
the representation of result object and by the `Compare` class to group
and display results for comparison.
4) Instruction counts
In addition to wall times, Timer can run a statement under Callgrind
and report instructions executed.
Directly analogous to `timeit.Timer` constructor arguments:
`stmt`, `setup`, `timer`, `globals`
PyTorch Timer specific constructor arguments:
`label`, `sub_label`, `description`, `env`, `num_threads`
Args:
stmt: Code snippet to be run in a loop and timed.
setup: Optional setup code. Used to define variables used in `stmt`
timer:
Callable which returns the current time. If PyTorch was built
without CUDA or there is no GPU present, this defaults to
`timeit.default_timer`; otherwise it will synchronize CUDA before
measuring the time.
globals:
A dict which defines the global variables when `stmt` is being
executed. This is the other method for providing variables which
`stmt` needs.
label:
String which summarizes `stmt`. For instance, if `stmt` is
"torch.nn.functional.relu(torch.add(x, 1, out=out))"
one might set label to "ReLU(x + 1)" to improve readability.
sub_label:
Provide supplemental information to disambiguate measurements
with identical stmt or label. For instance, in our example
above sub_label might be "float" or "int", so that it is easy
to differentiate:
"ReLU(x + 1): (float)"
"ReLU(x + 1): (int)"
when printing Measurements or summarizing using `Compare`.
description:
String to distinguish measurements with identical label and
sub_label. The principal use of `description` is to signal to
`Compare` the columns of data. For instance one might set it
based on the input size to create a table of the form: ::
| n=1 | n=4 | ...
------------- ...
ReLU(x + 1): (float) | ... | ... | ...
ReLU(x + 1): (int) | ... | ... | ...
using `Compare`. It is also included when printing a Measurement.
env:
This tag indicates that otherwise identical tasks were run in
different environments, and are therefore not equivilent, for
instance when A/B testing a change to a kernel. `Compare` will
treat Measurements with different `env` specification as distinct
when merging replicate runs.
num_threads:
The size of the PyTorch threadpool when executing `stmt`. Single
threaded performace is important as both a key inference workload
and a good indicator of intrinsic algorithmic efficiency, so the
default is set to one. This is in contrast to the default PyTorch
threadpool size which tries to utilize all cores.
"""
_timer_cls: Type[TimerClass] = timeit.Timer
def __init__(
self,
stmt: str = "pass",
setup: str = "pass",
timer: Callable[[], float] = timer,
globals: Optional[Dict[str, Any]] = None,
label: Optional[str] = None,
sub_label: Optional[str] = None,
description: Optional[str] = None,
env: Optional[str] = None,
num_threads: int = 1,
language: Union[Language, str] = Language.PYTHON,
):
if not isinstance(stmt, str):
raise ValueError("Currently only a `str` stmt is supported.")
# We copy `globals` to prevent mutations from leaking.
# (For instance, `eval` adds the `__builtins__` key)
self._globals = dict(globals or {})
if language in (Language.PYTHON, "py", "python"):
# Include `torch` if not specified as a convenience feature.
self._globals.setdefault("torch", torch)
self._language: Language = Language.PYTHON
elif language in (Language.CPP, "cpp", "c++"):
assert self._timer_cls is timeit.Timer, "_timer_cls has already been swapped."
self._timer_cls = CPPTimer
setup = ("" if setup == "pass" else setup)
self._language = Language.CPP
else:
raise ValueError(f"Invalid language `{language}`.")
# Convenience adjustment so that multi-line code snippets defined in
# functions do not IndentationError (Python) or look odd (C++). The
# leading newline removal is for the initial newline that appears when
# defining block strings. For instance:
# textwrap.dedent("""
# print("This is a stmt")
# """)
# produces '\nprint("This is a stmt")\n'.
#
# Stripping this down to 'print("This is a stmt")' doesn't change
# what gets executed, but it makes __repr__'s nicer.
stmt = textwrap.dedent(stmt)
stmt = (stmt[1:] if stmt and stmt[0] == "\n" else stmt).rstrip()
setup = textwrap.dedent(setup)
setup = (setup[1:] if setup and setup[0] == "\n" else setup).rstrip()
self._timer = self._timer_cls(
stmt=stmt,
setup=setup,
timer=timer,
globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals),
)
self._task_spec = common.TaskSpec(
stmt=stmt,
setup=setup,
label=label,
sub_label=sub_label,
description=description,
env=env,
num_threads=num_threads,
)
def timeit(self, number: int = 1000000) -> common.Measurement:
"""Mirrors the semantics of timeit.Timer.timeit().
Execute the main statement (`stmt`) `number` times.
https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit
"""
with common.set_torch_threads(self._task_spec.num_threads):
# Warmup
self._timer.timeit(number=max(int(number // 100), 1))
return common.Measurement(
number_per_run=number,
raw_times=[self._timer.timeit(number=number)],
task_spec=self._task_spec
)
def repeat(self, repeat: int = -1, number: int = -1) -> None:
raise NotImplementedError("See `Timer.blocked_autorange.`")
def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None:
raise NotImplementedError("See `Timer.blocked_autorange.`")
def _threaded_measurement_loop(
self,
number: int,
time_hook: Callable[[], float],
stop_hook: Callable[[List[float]], bool],
min_run_time: float,
max_run_time: Optional[float] = None,
callback: Optional[Callable[[int, float], NoReturn]] = None
) -> List[float]:
total_time = 0.0
can_stop = False
times: List[float] = []
with common.set_torch_threads(self._task_spec.num_threads):
while (total_time < min_run_time) or (not can_stop):
time_spent = time_hook()
times.append(time_spent)
total_time += time_spent
if callback:
callback(number, time_spent)
can_stop = stop_hook(times)
if max_run_time and total_time > max_run_time:
break
return times
def _estimate_block_size(self, min_run_time: float) -> int:
with common.set_torch_threads(self._task_spec.num_threads):
# Estimate the block size needed for measurement to be negligible
# compared to the inner loop. This also serves as a warmup.
overhead = np.median([self._timer.timeit(0) for _ in range(5)])
number = 1
while True:
time_taken = self._timer.timeit(number)
relative_overhead = overhead / time_taken
if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000:
break
if time_taken > min_run_time:
break
number *= 10
return number
def adaptive_autorange(
self,
threshold: float = 0.1,
*,
min_run_time: float = 0.01,
max_run_time: float = 10.0,
callback: Optional[Callable[[int, float], NoReturn]] = None,
) -> common.Measurement:
number = self._estimate_block_size(min_run_time=0.05)
def time_hook() -> float:
return self._timer.timeit(number)
def stop_hook(times: List[float]) -> bool:
if len(times) > 3:
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
).meets_confidence(threshold=threshold)
return False
times = self._threaded_measurement_loop(
number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback)
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
)
def blocked_autorange(
self,
callback: Optional[Callable[[int, float], NoReturn]] = None,
min_run_time: float = 0.2,
) -> common.Measurement:
"""Measure many replicates while keeping timer overhead to a minimum.
At a high level, blocked_autorange executes the following pseudo-code::
`setup`
total_time = 0
while total_time < min_run_time
start = timer()
for _ in range(block_size):
`stmt`
total_time += (timer() - start)
Note the variable `block_size` in the inner loop. The choice of block
size is important to measurement quality, and must balance two
competing objectives:
1) A small block size results in more replicates and generally
better statistics.
2) A large block size better amortizes the cost of `timer`
invocation, and results in a less biased measurement. This is
important because CUDA syncronization time is non-trivial
(order single to low double digit microseconds) and would
otherwise bias the measurement.
blocked_autorange sets block_size by running a warmup period,
increasing block size until timer overhead is less than 0.1% of
the overall computation. This value is then used for the main
measurement loop.
Returns:
A `Measurement` object that contains measured runtimes and
repetition counts, and can be used to compute statistics.
(mean, median, etc.)
"""
number = self._estimate_block_size(min_run_time)
def time_hook() -> float:
return self._timer.timeit(number)
def stop_hook(times: List[float]) -> bool:
return True
times = self._threaded_measurement_loop(
number, time_hook, stop_hook,
min_run_time=min_run_time,
callback=callback)
return common.Measurement(
number_per_run=number,
raw_times=times,
task_spec=self._task_spec
)
def collect_callgrind(
self,
number: int = 100,
collect_baseline: bool = True
) -> valgrind_timer_interface.CallgrindStats:
"""Collect instruction counts using Callgrind.
Unlike wall times, instruction counts are deterministic
(modulo non-determinism in the program itself and small amounts of
jitter from the Python interpreter.) This makes them ideal for detailed
performance analysis. This method runs `stmt` in a separate process
so that Valgrind can instrument the program. Performance is severely
degraded due to the instrumentation, howevever this is ameliorated by
the fact that a small number of iterations is generally sufficient to
obtain good measurements.
In order to to use this method `valgrind`, `callgrind_control`, and
`callgrind_annotate` must be installed.
Because there is a process boundary between the caller (this process)
and the `stmt` execution, `globals` cannot contain arbitrary in-memory
data structures. (Unlike timing methods) Instead, globals are
restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules
to reduce the surprise factor from serialization and subsequent
deserialization. The `GlobalsBridge` class provides more detail on this
subject. Take particular care with nn.Modules: they rely on pickle and
you may need to add an import to `setup` for them to transfer properly.
By default, a profile for an empty statement will be collected and
cached to indicate how many instructions are from the Python loop which
drives `stmt`.
Returns:
A `CallgrindStats` object which provides instruction counts and
some basic facilities for analyzing and manipulating results.
"""
if not isinstance(self._task_spec.stmt, str):
raise ValueError("`collect_callgrind` currently only supports string `stmt`")
# Check that the statement is valid. It doesn't guarantee success, but it's much
# simpler and quicker to raise an exception for a faulty `stmt` or `setup` in
# the parent process rather than the valgrind subprocess.
self._timer.timeit(1)
is_python = (self._language == Language.PYTHON)
assert is_python or not self._globals
return valgrind_timer_interface.wrapper_singleton().collect_callgrind(
task_spec=self._task_spec,
globals=self._globals,
number=number,
collect_baseline=collect_baseline and is_python,
is_python=is_python)
| [
"torch.cuda.synchronize",
"torch.utils.benchmark.utils.common.TaskSpec",
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.wrapper_singleton",
"torch.utils.benchmark.utils.common.set_torch_threads",
"torch.cuda.is_available",
"torch.utils.benchmark.utils.common.Measurement",
"torch.utils.benchmark.utils.valgrind_wrapper.timer_interface.CopyIfCallgrind.unwrap_all",
"torch.utils.benchmark.utils.cpp_jit.compile_timeit_template"
] | 1.8.1 | GOOGLE-M/SGC | 78ad8d02b80808302e38559e2d0f430f66a809bd |
1.1 |
from .single_stage import SingleStageDetector
from ..registry import DETECTORS
from mmdet.core import bbox2result
import torch.nn as nn
import torch
from .. import builder
import numpy as np
import cv2
from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler
@DETECTORS.register_module
class CSP(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
refine_roi_extractor=None,
refine_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
detached=True,
return_feature_maps=False):
super(CSP, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
if refine_head is not None:
self.refine_roi_extractor = builder.build_roi_extractor(
refine_roi_extractor)
self.refine_head = builder.build_head(refine_head)
self.return_feature_maps = return_feature_maps
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.detached = detached
def show_input_debug(self, img, classification_maps, scale_maps, offset_maps):
img_numpy = img.cpu().numpy().copy()[0]
# img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [8, 16, 32, 64, 128]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
# cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]
cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][:80]
scale_numpy = scale_maps[0][i].cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[0][i].cpu().numpy().copy()[0][:2]
cs, ys, xs = cls_numpy.nonzero()
print(len(ys))
for c, x, y in zip(cs, xs, ys):
cv2.imshow(str(c), classification_maps[0][i].cpu().numpy().copy()[0][80+c])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy - height/2)
top_x = int(realx)
down_y = int(realy + height/2)
down_x = int(realx)
top_left = (int(top_x - height * 0.1), int(top_y))
down_right = (int(down_x + height * 0.1), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
def show_input_debug_caltech(self, img, classification_maps, scale_maps, offset_maps):
for j in range(img.shape[0]):
img_numpy = img.cpu().numpy().copy()[j]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [4]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]
ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]
cv2.imshow('ignore', ignore_numpy)
scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]
ys, xs = cls_numpy.nonzero()
print(len(ys))
for x, y in zip(xs, ys):
# cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy - height/2)
top_x = int(realx)
down_y = int(realy + height/2)
down_x = int(realx)
top_left = (int(top_x - height * 0.1), int(top_y))
down_right = (int(down_x + height * 0.1), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
def show_input_debug_head(self, img, classification_maps, scale_maps, offset_maps):
for j in range(img.shape[0]):
img_numpy = img.cpu().numpy().copy()[j]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [4]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]
ignore_numpy = classification_maps[j][i].cpu().numpy().copy()[0][1]
cv2.imshow('ignore', ignore_numpy)
scale_numpy = scale_maps[j][i].exp().cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]
ys, xs = cls_numpy.nonzero()
for x, y in zip(xs, ys):
# cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][c])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy)
top_x = int(realx)
down_y = int(realy + height)
down_x = int(realx)
top_left = (int(top_x - height * 0.41/2), int(top_y))
down_right = (int(down_x + height * 0.41/2), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 125), 2)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
def show_mot_input_debug(self, img, classification_maps, scale_maps, offset_maps):
for j in range(img.shape[0]):
img_numpy = img.cpu().numpy().copy()[j]
img_numpy = np.transpose(img_numpy, [1, 2, 0]) * [58.395, 57.12, 57.375] + [123.675, 116.28, 103.53]
# img_numpy = np.transpose(img_numpy, [1, 2, 0]) + [102.9801, 115.9465, 122.7717]
img_numpy = img_numpy[:, :, ::-1]
img_numpy = img_numpy.astype(np.uint8)
strides = [4]
img_nows = []
for i, stride in enumerate(strides):
img_now = img_numpy.copy()
# cls_numpy = classification_maps[0][i].cpu().numpy().copy()[0][2]
cls_numpy = classification_maps[j][i].cpu().numpy().copy()[0][2]
instance_numpy = classification_maps[j][i].cpu().numpy().copy()[0][3]
scale_numpy = scale_maps[j][i].cpu().numpy().copy()[0][0] * stride
offset_numpy = offset_maps[j][i].cpu().numpy().copy()[0][:2]
ys, xs = cls_numpy.nonzero()
for x, y in zip(xs, ys):
c=0
cv2.imshow(str(c), classification_maps[j][i].cpu().numpy().copy()[0][2])
realx = x
realy = y
height = scale_numpy[y, x]
realy = realy + 0.5 + offset_numpy[0][y, x]
realx = realx + 0.5 + offset_numpy[1][y, x]
realy = realy * stride
realx = realx * stride
top_y = int(realy - height/2)
top_x = int(realx)
down_y = int(realy + height/2)
down_x = int(realx)
top_left = (int(top_x - height * 0.1), int(top_y))
down_right = (int(down_x + height * 0.1), down_y)
cv2.rectangle(img_now, top_left, down_right, (255, 255, 5*int(c)), 2)
instance = instance_numpy[y, x]
cv2.putText(img_now, str(instance), top_left, cv2.FONT_HERSHEY_COMPLEX, 1, 255)
img_nows.append(img_now)
cv2.imshow(str(i) +'img', img_now)
cv2.waitKey(0)
@property
def refine(self):
return hasattr(self, 'refine_head') and self.refine_head is not None
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
classification_maps=None,
scale_maps=None,
offset_maps=None):
# for tracking data which batch is produced by dataset instead of data loader
if type(img) == list:
img=img[0]
img_metas=img_metas[0]
gt_bboxes=gt_bboxes[0]
gt_labels=gt_labels[0]
gt_bboxes_ignore = gt_bboxes_ignore[0]
classification_maps = classification_maps[0]
scale_maps = scale_maps[0]
offset_maps = offset_maps[0]
losses = dict()
x = self.extract_feat(img)
# self.show_input_debug(img, classification_maps, scale_maps, offset_maps)
# self.show_input_debug_caltech(img, classification_maps, scale_maps, offset_maps)
# self.show_mot_input_debug(img, classification_maps, scale_maps, offset_maps)
# self.show_input_debug_head(img, classification_maps, scale_maps, offset_maps)
outs = self.bbox_head(x)
loss_inputs = outs + (gt_bboxes, gt_labels, classification_maps, scale_maps, offset_maps, img_metas, self.train_cfg.csp_head if self.refine else self.train_cfg)
losses_bbox = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(losses_bbox)
if self.refine:
if self.detached:
x = tuple([i.detach() for i in x])
bbox_inputs = outs + (img_metas, self.train_cfg.csp_head, False)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False) # no_strides to not upscale yet
bbox_list = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]
for det_bboxes, det_labels in bbox_list
]
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
if bbox_list[i].shape[0] == 0 or gt_bboxes[i].shape[0] == 0:
continue
bbox = torch.tensor(bbox_list[i]).float().cuda()
assign_result = bbox_assigner.assign(
bbox, gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
bbox,
gt_bboxes[i],
gt_labels[i])
sampling_results.append(sampling_result)
samp_list = [res.bboxes for res in sampling_results]
if len(samp_list) == 0:
losses.update(dict(loss_refine_cls=torch.tensor(0).float().cuda(), acc=torch.tensor(0).float().cuda()))
return losses
rois = bbox2roi(samp_list).float()
if self.refine_head.loss_opinion is not None:
pred_scores = torch.cat([torch.tensor(bbox[:, 4]).float().cuda() for bbox in bbox_list], dim=0)
pred_rois = bbox2roi([torch.tensor(bbox).float().cuda() for bbox in bbox_list])
pred_feats = self.refine_roi_extractor(
x, pred_rois)
pred_scores_refine = self.refine_head(pred_feats)
loss_opinion = self.refine_head.compute_opinion_loss(pred_scores, pred_scores_refine)
losses.update(loss_opinion)
bbox_feats = self.refine_roi_extractor(
x, rois)
cls_score = self.refine_head(bbox_feats)
bbox_targets = self.refine_head.get_target(
sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)
loss_refine = self.refine_head.loss(cls_score,
*bbox_targets[:2])
losses.update(dict(loss_refine_cls=loss_refine["loss_cls"], distL1=loss_refine["dist"]))
return losses
def simple_test_accuracy(self, img, img_meta):
gts = img_meta[0]["gts"]
x = self.extract_feat(img)
if self.detached:
x = (x[0].detach(),)
rois = bbox2roi(gts)
if rois.shape[0] == 0:
return 0, 0
roi_feats = self.refine_roi_extractor(
x, rois)
cls_score = self.refine_head.get_scores(roi_feats)
return (cls_score > 0.5).float().sum(), rois.size(0)
def simple_test(self, img, img_meta, rescale=False, return_id=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg.csp_head if self.refine else self.test_cfg, False) # TODO://Handle rescalling
if self.return_feature_maps:
return self.bbox_head.get_bboxes_features(*bbox_inputs)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, no_strides=False)
im_scale = img_meta[0]["scale_factor"]
if "id" in img_meta[0]:
img_id = img_meta[0]["id"]
else:
img_id = 0
if self.refine:
if self.detached:
x = (x[0].detach(),)
bbox_list = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)[0]
for det_bboxes, det_labels in bbox_list
]
refine_cfg = self.test_cfg.get('rcnn', None)
bbox_list = [torch.tensor(bbox).float().cuda() for bbox in bbox_list]
rois = bbox2roi(bbox_list)
bbox_list = [bbox/im_scale for bbox in bbox_list]
if rois.shape[0] == 0:
cls_score = None
else:
roi_feats = self.refine_roi_extractor(
x, rois)
cls_score = self.refine_head.get_scores(roi_feats)
res_buffer = []
if cls_score is not None:
if refine_cfg is not None:
res_buffer = self.refine_head.suppress_boxes(rois, cls_score, img_meta, cfg=refine_cfg)
else:
res_buffer = self.refine_head.combine_scores(bbox_list, cls_score)
if return_id:
return res_buffer, img_id
return res_buffer
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
if return_id:
return bbox_results[0], img_id
return bbox_results[0]
def foward_features(self, features):
bbox_list = self.bbox_head.get_bboxes(*features)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results[0]
| [
"torch.tensor"
] | 1.1 | mohammedshariqnawaz/Pedestron | 9785feb94f00e07ae24a662525b4678f12d0fdc8 |
1.0 | import torch
from torch import nn
from torch.distributions import MultivariateNormal
class Normal(nn.Module):
def __init__(self, num_vars=100):
super(Normal, self).__init__()
self.num_vars = num_vars
self.means = nn.Parameter(torch.zeros(num_vars))
self.std = nn.Parameter(torch.eye(num_vars))
def log_prob(self, x):
distr = MultivariateNormal(self.means, self.std)
return distr.log_prob(x)
def sample(self, num_samples):
distr = MultivariateNormal(self.means, self.std)
return distr.sample_n(num_samples)
| [
"torch.zeros",
"torch.distributions.MultivariateNormal",
"torch.eye"
] | 1.0.1 | insilicomedicine/TRIP | 5e7b9da298aa47a71c71e1144ff1d8e538dbccaa |
1.0 | import torch
import torch.nn as nn
from torch import autograd
import torch.optim as optim
from ...utils import TrainStats
class WGAN(nn.Module):
def __init__(self, gen, discr, prior, n_critic=5, gamma=1, gp=True,
device='cpu'):
super(WGAN, self).__init__()
self.gen = gen
self.discr = discr
self.prior = prior
self.gamma = gamma
self.n_critic = n_critic
self.gp = gp
self.device = device
def get_losses(self, x, compute_reinforce=False):
# get generator samples
sampled_latents = self.prior.sample(x.shape[0])
sampled_latents = sampled_latents.detach()
sampled_images = self.gen(sampled_latents)
# get discriminator outputs
real_discr = self.discr(x)
fake_discr = self.discr(sampled_images)
# compute gradient penalties
if self.gp:
alphas = torch.rand(x.shape[0], 1, 1, 1).repeat(1, x.shape[1],
x.shape[2],
x.shape[3])
alphas = alphas.to(self.device)
int_points = alphas * sampled_images + (1 - alphas) * x
int_points_discr = self.discr(int_points)
gradients = autograd.grad(outputs=int_points_discr, inputs=int_points,
grad_outputs=torch.ones(
int_points_discr.size()).to(self.device),
create_graph=True, retain_graph=True,
only_inputs=True)[0]
grad_norm = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
# compute reinforce loss
if compute_reinforce:
rews = (fake_discr - fake_discr.mean()).detach()
rews = rews / rews.std()
lp_loss = -(rews * self.prior.log_prob(sampled_latents)).mean()
else:
lp_loss = torch.zeros(1).mean()
# compute losses
gen_loss = -fake_discr.mean()
discr_loss = -(
real_discr.mean() - fake_discr.mean())
if self.gp:
discr_loss = discr_loss + self.gamma * grad_norm
return gen_loss, \
discr_loss, \
lp_loss, \
{
'gen_loss': gen_loss.detach().cpu().numpy(),
'discr_loss': discr_loss.detach().cpu().numpy(),
'lp_loss': lp_loss.detach().cpu().numpy(),
'grad_norm': grad_norm.detach().cpu().numpy()
}
def make_training(self, train_loader, global_stats=None, num_iterations=20000, verbose_step=50,
train_lp=True, lr=1e-4, lp_lr=1e-4):
gen_optimizer = optim.Adam(self.gen.parameters(), lr=lr, betas=(0.5, .9))
discr_optimizer = optim.Adam(self.discr.parameters(), lr=lr,
betas=(0.5, .9))
lp_optimizer = optim.Adam(self.prior.parameters(), lr=lp_lr)
local_stats = TrainStats()
cur_iteration = 0
epoch_i = 0
while cur_iteration < num_iterations:
i = 0
print("Epoch", epoch_i, ":")
for x_batch, _ in train_loader:
x_batch = x_batch.to(self.device)
print("!", end='')
i += 1
gen_loss, discr_loss, lp_loss, cur_stats = self.get_losses(
x_batch, (i % self.n_critic == 0) and train_lp)
local_stats.update(cur_stats)
if global_stats is not None:
global_stats.update(cur_stats)
if i % self.n_critic == 0:
gen_optimizer.zero_grad()
gen_loss.backward()
gen_optimizer.step()
if train_lp:
lp_optimizer.zero_grad()
lp_loss.backward()
lp_optimizer.step()
self.prior.stabilize()
else:
discr_optimizer.zero_grad()
discr_loss.backward()
discr_optimizer.step()
cur_iteration += 1
if cur_iteration >= num_iterations:
break
if i % verbose_step == 0:
local_stats.print()
local_stats.reset()
i = 0
epoch_i += 1
if i > 0:
local_stats.print()
local_stats.reset()
return global_stats
def sample(self, num_samples):
z = self.prior.sample(num_samples)
samples = self.gen(z)
return samples.detach().cpu().numpy() | [
"torch.zeros",
"torch.rand"
] | 1.0.1 | insilicomedicine/TRIP | 5e7b9da298aa47a71c71e1144ff1d8e538dbccaa |
1.8 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as td
class Flow(nn.Module):
"""
Building both normalizing flows and neural flows.
Example:
>>> import stribor as st
>>> torch.manual_seed(123)
>>> dim = 2
>>> flow = st.Flow(st.UnitNormal(dim), [st.Affine(dim)])
>>> x = torch.rand(1, dim)
>>> y, ljd = flow(x)
>>> y_inv, ljd_inv = flow.inverse(y)
Args:
base_dist (Type[torch.distributions]): Base distribution
transforms (List[st.flows]): List of invertible transformations
"""
def __init__(self, base_dist=None, transforms=[]):
super().__init__()
self.base_dist = base_dist
self.transforms = nn.ModuleList(transforms)
def forward(self, x, latent=None, mask=None, t=None, reverse=False, **kwargs):
"""
Args:
x (tensor): Input sampled from base density with shape (..., dim)
latent (tensor, optional): Conditional vector with shape (..., latent_dim)
Default: None
mask (tensor): Masking tensor with shape (..., 1)
Default: None
t (tensor, optional): Flow time end point. Default: None
reverse (bool, optional): Whether to perform an inverse. Default: False
Returns:
y (tensor): Output that follows target density (..., dim)
log_jac_diag (tensor): Log-Jacobian diagonal (..., dim)
"""
transforms = self.transforms[::-1] if reverse else self.transforms
_mask = 1 if mask is None else mask
log_jac_diag = torch.zeros_like(x).to(x)
for f in transforms:
if reverse:
x, ld = f.inverse(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
else:
x, ld = f.forward(x * _mask, latent=latent, mask=mask, t=t, **kwargs)
log_jac_diag += ld * _mask
return x, log_jac_diag
def inverse(self, y, latent=None, mask=None, t=None, **kwargs):
""" Inverse of forward function with the same arguments. """
return self.forward(y, latent=latent, mask=mask, t=t, reverse=True, **kwargs)
def log_prob(self, x, **kwargs):
"""
Calculates log-probability of a sample.
Args:
x (tensor): Input with shape (..., dim)
Returns:
log_prob (tensor): Log-probability of the input with shape (..., 1)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need log-probability')
x, log_jac_diag = self.inverse(x, **kwargs)
log_prob = self.base_dist.log_prob(x) + log_jac_diag.sum(-1)
return log_prob.unsqueeze(-1)
def sample(self, num_samples, latent=None, mask=None, **kwargs):
"""
Transforms samples from the base to the target distribution.
Uses reparametrization trick.
Args:
num_samples (tuple or int): Shape of samples
latent (tensor): Latent conditioning vector with shape (..., latent_dim)
Returns:
x (tensor): Samples from target distribution with shape (*num_samples, dim)
"""
if self.base_dist is None:
raise ValueError('Please define `base_dist` if you need sampling')
if isinstance(num_samples, int):
num_samples = (num_samples,)
x = self.base_dist.rsample(num_samples)
x, log_jac_diag = self.forward(x, **kwargs)
return x
| [
"torch.zeros_like",
"torch.nn.ModuleList"
] | 1.8.0 | mbilos/stribor | 76082c255653d6bd8d506519223183e5d8395578 |
1.8 | import torch
import torch.nn as nn
import torch.nn.functional as F
def diff(x, dim=-1):
"""
Inverse of x.cumsum(dim=dim).
Compute differences between subsequent elements of the tensor.
Only works on dims -1 and -2.
Args:
x (tensor): Input of arbitrary shape
Returns:
diff (tensor): Result with the same shape as x
"""
if dim == 1:
if x.dim() == 2:
dim = -1
elif x.dim() == 3:
dim = -2
else:
raise ValueError('If dim=1, tensor must have 2 or 3 dimensions')
if dim == 2:
if x.dim() == 3:
dim = -1
elif x.dim() == 4:
dim = -2
else:
raise ValueError('If dim=2, tensor should have 3 or 4 dimensions')
if dim == -1:
return x - F.pad(x, (1, 0))[..., :-1]
elif dim == -2:
return x - F.pad(x, (0, 0, 1, 0))[..., :-1, :]
else:
raise ValueError("dim must be equal to -1 or -2")
class Cumsum(nn.Module):
"""
Compute cumulative sum along the specified dimension of the tensor.
Example:
>>> f = stribor.Cumsum(-1)
>>> f(torch.ones(1, 4))
(tensor([[1., 2., 3., 4.]]), tensor([[0., 0., 0., 0.]]))
Args:
dim (int): Tensor dimension over which to perform the summation. Options: -1 or -2.
"""
def __init__(self, dim):
super().__init__()
assert dim in [-1, -2], '`dim` must be either `-1` or `-2`'
self.dim = dim
def forward(self, x, **kwargs):
y = x.cumsum(self.dim)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = diff(y, self.dim)
return x, torch.zeros_like(x)
class Diff(nn.Module):
"""
Inverse of Cumsum transformation.
Args:
dim (int): Tensor dimension over which to perform the diff. Options: -1 or -2.
"""
def __init__(self, dim):
super().__init__()
self.base_flow = Cumsum(dim)
def forward(self, x, **kwargs):
return self.base_flow.inverse(x, **kwargs)
def inverse(self, x, **kwargs):
return self.base_flow.forward(x, **kwargs)
class CumsumColumn(nn.Module):
"""
Cumulative sum along the specific column in (..., M, N) matrix.
Example:
>>> f = stribor.CumsumColumn(1)
>>> f(torch.ones(3, 3))[0]
tensor([[1., 1., 1.],
[1., 2., 1.],
[1., 3., 1.]])
Args:
column (int): Column in the (batched) matrix (..., M, N) over which to
perform the summation
"""
def __init__(self, column):
super().__init__()
self.column = column
def forward(self, x, **kwargs):
y = x.clone()
y[..., self.column] = y[..., self.column].cumsum(-1)
return y, torch.zeros_like(y)
def inverse(self, y, **kwargs):
x = y.clone()
x[..., self.column] = diff(x[..., self.column], -1)
return x, torch.zeros_like(x)
class DiffColumn(nn.Module):
def __init__(self, column):
super().__init__()
self.base_flow = CumsumColumn(column)
def forward(self, x, **kwargs):
return self.base_flow.inverse(x, **kwargs)
def inverse(self, x, **kwargs):
return self.base_flow.forward(x, **kwargs)
| [
"torch.zeros_like",
"torch.nn.functional.pad"
] | 1.8.0 | mbilos/stribor | 76082c255653d6bd8d506519223183e5d8395578 |
3 | import sys
import math
import os
import torch
import torchvision
import numpy as np
from pkg_resources import resource_stream
def interpolate1d(x, values, tangents):
'''
Returns:
Returns the interpolated or extrapolated values for each query point,
depending on whether or not the query lies within the span of the spline.
'''
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert values.dtype == float_dtype
assert tangents.dtype == float_dtype
assert len(values.shape) == 1
assert len(tangents.shape) == 1
assert values.shape[0] == tangents.shape[0]
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),
values.shape[0] - 2)).type(torch.int64)
x_hi = x_lo + 1
# Compute the relative distance between each `x` and the knot below it.
t = x - x_lo.type(float_dtype)
# Compute the cubic hermite expansion of `t`.
t_sq = t**2
t_cu = t * t_sq
h01 = -2. * t_cu + 3. * t_sq
h00 = 1. - h01
h11 = t_cu - t_sq
h10 = h11 - t_sq + t
# Linearly extrapolate above and below the extents of the spline for all
# values.
value_before = tangents[0] * t + values[0]
value_after = tangents[-1] * (t - 1.) + values[-1]
# Cubically interpolate between the knots below and above each query point.
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = (
neighbor_values_lo * h00 + neighbor_values_hi * h01 +
neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)
return torch.where(t < 0., value_before,
torch.where(t > 1., value_after, value_mid))
def log_safe(x):
x = torch.as_tensor(x)
return torch.log(torch.min(x, torch.tensor(33e37).to(x)))
def load_spline_params():
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, '../misc/partition_spline.npz'), "rb") as spline_file:
with np.load(spline_file, allow_pickle=False) as f:
spline_x_scale = torch.tensor(f['x_scale'])
spline_values = torch.tensor(f['values'])
spline_tangents = torch.tensor(f['tangents'])
return spline_x_scale, spline_values, spline_tangents
def get_partition_init(shape):
shape = torch.as_tensor(shape)
base1 = (2.25 * shape - 4.5) / (torch.abs(shape - 2) + 0.25) + shape + 2
base2 = 5. / 18. * log_safe(4 * shape - 15) + 8
return torch.where(shape < 4, base1, base2)
def get_partition(shape):
shape = torch.as_tensor(shape)
assert (shape >= 0).all()
init = get_partition_init(shape)
x_scale, values, tangents = load_spline_params()
return interpolate1d(init * x_scale.to(init), values.to(init), tangents.to(init))
def general_adaptive_loss(x, shape, bowl=1.):
input_shape = x.shape
shape = torch.as_tensor(shape).to(x.device)
bowl = torch.as_tensor(bowl).to(x.device)
b = x.size(0)
x = x.view(b, -1)
if len(shape.shape) == 0:
shape = shape.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)
else:
shape = shape.view(b, -1)
if len(bowl.shape) == 0:
bowl = bowl.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)
else:
bowl = bowl.view(b, -1)
partition = get_partition(shape)
ans = (torch.abs(shape - 2)/shape) * (torch.pow((torch.square(x/bowl) /
torch.abs(shape - 2) + 1), shape/2) - 1) + log_safe(bowl) + log_safe(partition)
return ans.view(input_shape)
| [
"torch.is_tensor",
"torch.square",
"torch.abs",
"torch.tensor",
"torch.as_tensor",
"torch.where"
] | 3 | jmendozais/SDSSDepth | 7a4d0c5affef3eda7056876ccb2365ac883c08eb |
1.8 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import sys
import unittest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from opacus import PrivacyEngine
from opacus.distributed import DifferentiallyPrivateDistributedDataParallel as DPDDP
from torch.nn.parallel import DistributedDataParallel as DDP
PRIVACY_ALPHAS = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))
def setup_and_get_device(rank, world_size, nonce=0):
"""
Initialize the torch.distributed process group.
If you run multiple groups in parallel or if you have zombie processes, you can add a nonce to avoid errors.
"""
device = 0
if sys.platform == "win32":
# Distributed package only covers collective communications with Gloo
# backend and FileStore on Windows platform. Set init_method parameter
# in init_process_group to a local file.
# Example init_method="file:///f:/libtmp/some_file"
init_method = "file:///{your local file path}"
# initialize the process group
dist.init_process_group(
"gloo", init_method=init_method, rank=rank, world_size=world_size
)
device = rank
elif os.environ.get("SLURM_NTASKS") is not None:
# Running on a Slurm cluster
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(7440 + nonce)
local_rank = int(os.environ.get("SLURM_LOCALID"))
dist.init_process_group(backend="gloo", rank=rank, world_size=world_size)
# The device is the local rank (if you have 2 nodes with 8 GPUs each, you will have two "cuda:0" devices)
device = local_rank
else:
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
os.environ["RANK"] = str(rank)
os.environ["WORLD_SIZE"] = str(world_size)
dist.init_process_group(
init_method="env://",
backend="nccl",
)
# Single node experiment
device = rank
return device
def cleanup():
dist.destroy_process_group()
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = nn.Linear(10, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 5)
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
def demo_basic(rank, world_size, weight, dp, noise_multiplier=0, max_grad_norm=1e8):
# We don't want the 2 GPUs to work on the same examples/labels in parallel
torch.manual_seed(rank)
batch_size = 32
withdp = "with" + ("out " if not dp else "")
print(f"Running basic DDP {withdp} differential privacy example on rank {rank}.")
device = setup_and_get_device(rank, world_size)
# create model and move it to GPU with id rank
model = ToyModel().to(device)
print(f"Initial weight: {model.net1.weight.data}")
# Freeze all the parameters except one, to ensure that the noise is the same
# (the DDP hook does not browse the layers in the same order as the naive implementation)
model.net1.bias.requires_grad = False
model.net2.bias.requires_grad = False
model.net2.weight.requires_grad = False
if dp:
ddp_model = DPDDP(model)
engine = PrivacyEngine(
ddp_model,
batch_size=batch_size,
sample_size=10 * batch_size,
alphas=PRIVACY_ALPHAS,
noise_multiplier=noise_multiplier,
max_grad_norm=[max_grad_norm],
)
engine.random_number_generator = engine._set_seed(0)
else:
ddp_model = DDP(model, device_ids=[device])
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=1)
if dp:
engine.attach(optimizer)
optimizer.zero_grad()
labels = torch.randn(batch_size, 5).to(device)
outputs = ddp_model(torch.randn(batch_size, 10).to(device))
loss_fn(outputs, labels).backward()
optimizer.step()
weight.copy_(model.net1.weight.data.cpu())
cleanup()
def demo_ddp_hook(rank, world_size, weight, dp, noise_multiplier, max_grad_norm):
torch.manual_seed(rank)
batch_size = 32
withdp = "with" + ("out " if not dp else "")
print(f"Running DDP hook {withdp} differential privacy example on rank {rank}.")
device = setup_and_get_device(rank, world_size, nonce=1)
# create model and move it to GPU with id rank
model = ToyModel().to(device)
model.net1.bias.requires_grad = False
model.net2.bias.requires_grad = False
model.net2.weight.requires_grad = False
ddp_model = DDP(model, device_ids=[device])
if dp:
engine = PrivacyEngine(
ddp_model,
batch_size=batch_size,
sample_size=10 * batch_size,
alphas=PRIVACY_ALPHAS,
noise_multiplier=noise_multiplier,
max_grad_norm=[max_grad_norm],
)
engine.random_number_generator = engine._set_seed(0)
loss_fn = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=1)
if dp:
engine.attach(optimizer)
optimizer.zero_grad()
labels = torch.randn(batch_size, 5).to(device)
outputs = ddp_model(torch.randn(batch_size, 10).to(device))
loss_fn(outputs, labels).backward()
optimizer.step()
weight.copy_(model.net1.weight.data.cpu())
del ddp_model
cleanup()
def add_remove_ddp_hooks(
rank, world_size, remaining_hooks, dp, noise_multiplier=0, max_grad_norm=1e8
):
device = setup_and_get_device(rank, world_size, nonce=2)
model = ToyModel().to(device)
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=[device])
engine = PrivacyEngine(
ddp_model,
batch_size=1,
sample_size=10,
alphas=PRIVACY_ALPHAS,
noise_multiplier=noise_multiplier,
max_grad_norm=[max_grad_norm],
)
optimizer = optim.SGD(ddp_model.parameters(), lr=1)
engine.attach(optimizer)
remaining_hooks["attached"] = {
p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks
}
engine.detach()
remaining_hooks["detached"] = {
p: p._backward_hooks for p in engine.module.parameters() if p._backward_hooks
}
cleanup()
def debug(rank, world_size, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):
local_rank = setup_and_get_device(rank, world_size)
print(f"Rank: {rank},World size: {world_size}, local_rank: {local_rank}")
tensor = tensor.to(local_rank)
print(f"dp: {dp}")
print(tensor)
cleanup()
def run_function(local_function, tensor, dp, noise_multiplier=0, max_grad_norm=1e8):
if os.environ.get("SLURM_NTASKS") is not None:
world_size = int(os.environ.get("SLURM_NTASKS"))
rank = int(os.environ.get("SLURM_PROCID"))
print(f"Running on a Slurm cluster with {world_size} tasks.")
local_function(rank, world_size, tensor, dp, noise_multiplier, max_grad_norm)
else:
world_size = torch.cuda.device_count()
print(f"Spawning multiple processes on a local machine with {world_size} GPUs")
# The rank will be passed as the first argument
mp.spawn(
local_function,
args=(
world_size,
tensor,
dp,
noise_multiplier,
max_grad_norm,
),
nprocs=world_size,
join=True,
)
return world_size
class GradientComputationTest(unittest.TestCase):
def test_connection(self):
tensor = torch.zeros(10, 10)
world_size = run_function(debug, tensor, dp=True)
self.assertTrue(
world_size >= 2, f"Need at least 2 gpus but was provided only {world_size}."
)
def test_gradient_noclip_zeronoise(self):
# Tests that gradient is the same with DP or with DDP
weight_dp, weight_nodp = torch.zeros(10, 10), torch.zeros(10, 10)
run_function(demo_basic, weight_dp, dp=True)
run_function(demo_basic, weight_nodp, dp=False)
self.assertTrue(torch.norm(weight_dp - weight_nodp) < 1e-7)
def test_ddp_hook(self):
# Tests that the DDP hook does the same thing as naive aggregation with per layer clipping
weight_ddp_naive, weight_ddp_hook = torch.zeros(10, 10), torch.zeros(10, 10)
run_function(
demo_basic,
weight_ddp_naive,
dp=True,
noise_multiplier=0.1,
max_grad_norm=1.0,
)
run_function(
demo_ddp_hook,
weight_ddp_hook,
dp=True,
noise_multiplier=0.1,
max_grad_norm=1.0,
)
self.assertTrue(
torch.norm(weight_ddp_naive - weight_ddp_hook) < 1e-7,
f"DDP naive: {weight_ddp_naive}\nDDP hook: {weight_ddp_hook}",
)
def test_add_remove_ddp_hooks(self):
remaining_hooks = {
"attached": None,
"detached": None,
}
run_function(
add_remove_ddp_hooks,
remaining_hooks,
dp=True,
noise_multiplier=0.1,
max_grad_norm=1.0,
)
assert remaining_hooks["attached"], "There are no hooks."
assert not remaining_hooks[
"detached"
], f"Some hooks remain after .remove_hooks(): {remaining_hooks}"
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.MSELoss",
"torch.distributed.destroy_process_group",
"torch.distributed.init_process_group",
"torch.norm",
"torch.multiprocessing.spawn",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.nn.ReLU",
"torch.randn"
] | 1.8 | RQuispeC/opacus | 5c83d59fc169e93667946204f7a6859827a38ace |
1.4 | # Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import argparse
import sklearn.metrics as metrics
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
import MinkowskiEngine as ME
from examples.pointnet import (
PointNet,
MinkowskiPointNet,
CoordinateTransformation,
ModelNet40H5,
stack_collate_fn,
minkowski_collate_fn,
)
from examples.common import seed_all
parser = argparse.ArgumentParser()
parser.add_argument("--voxel_size", type=float, default=0.05)
parser.add_argument("--max_steps", type=int, default=100000)
parser.add_argument("--val_freq", type=int, default=1000)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--lr", default=1e-1, type=float)
parser.add_argument("--weight_decay", type=float, default=1e-4)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--stat_freq", type=int, default=100)
parser.add_argument("--weights", type=str, default="modelnet.pth")
parser.add_argument("--seed", type=int, default=777)
parser.add_argument("--translation", type=float, default=0.2)
parser.add_argument("--test_translation", type=float, default=0.0)
parser.add_argument(
"--network",
type=str,
choices=["pointnet", "minkpointnet", "minkfcnn", "minksplatfcnn"],
default="minkfcnn",
)
class MinkowskiFCNN(ME.MinkowskiNetwork):
def __init__(
self,
in_channel,
out_channel,
embedding_channel=1024,
channels=(32, 48, 64, 96, 128),
D=3,
):
ME.MinkowskiNetwork.__init__(self, D)
self.network_initialization(
in_channel,
out_channel,
channels=channels,
embedding_channel=embedding_channel,
kernel_size=3,
D=D,
)
self.weight_initialization()
def get_mlp_block(self, in_channel, out_channel):
return nn.Sequential(
ME.MinkowskiLinear(in_channel, out_channel, bias=False),
ME.MinkowskiBatchNorm(out_channel),
ME.MinkowskiLeakyReLU(),
)
def get_conv_block(self, in_channel, out_channel, kernel_size, stride):
return nn.Sequential(
ME.MinkowskiConvolution(
in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride,
dimension=self.D,
),
ME.MinkowskiBatchNorm(out_channel),
ME.MinkowskiLeakyReLU(),
)
def network_initialization(
self,
in_channel,
out_channel,
channels,
embedding_channel,
kernel_size,
D=3,
):
self.mlp1 = self.get_mlp_block(in_channel, channels[0])
self.conv1 = self.get_conv_block(
channels[0],
channels[1],
kernel_size=kernel_size,
stride=1,
)
self.conv2 = self.get_conv_block(
channels[1],
channels[2],
kernel_size=kernel_size,
stride=2,
)
self.conv3 = self.get_conv_block(
channels[2],
channels[3],
kernel_size=kernel_size,
stride=2,
)
self.conv4 = self.get_conv_block(
channels[3],
channels[4],
kernel_size=kernel_size,
stride=2,
)
self.conv5 = nn.Sequential(
self.get_conv_block(
channels[1] + channels[2] + channels[3] + channels[4],
embedding_channel // 4,
kernel_size=3,
stride=2,
),
self.get_conv_block(
embedding_channel // 4,
embedding_channel // 2,
kernel_size=3,
stride=2,
),
self.get_conv_block(
embedding_channel // 2,
embedding_channel,
kernel_size=3,
stride=2,
),
)
self.pool = ME.MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)
self.global_max_pool = ME.MinkowskiGlobalMaxPooling()
self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()
self.final = nn.Sequential(
self.get_mlp_block(embedding_channel * 2, 512),
ME.MinkowskiDropout(),
self.get_mlp_block(512, 512),
ME.MinkowskiLinear(512, out_channel, bias=True),
)
# No, Dropout, last 256 linear, AVG_POOLING 92%
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu")
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def forward(self, x: ME.TensorField):
x = self.mlp1(x)
y = x.sparse()
y = self.conv1(y)
y1 = self.pool(y)
y = self.conv2(y1)
y2 = self.pool(y)
y = self.conv3(y2)
y3 = self.pool(y)
y = self.conv4(y3)
y4 = self.pool(y)
x1 = y1.slice(x)
x2 = y2.slice(x)
x3 = y3.slice(x)
x4 = y4.slice(x)
x = ME.cat(x1, x2, x3, x4)
y = self.conv5(x.sparse())
x1 = self.global_max_pool(y)
x2 = self.global_avg_pool(y)
return self.final(ME.cat(x1, x2)).F
class GlobalMaxAvgPool(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.global_max_pool = ME.MinkowskiGlobalMaxPooling()
self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()
def forward(self, tensor):
x = self.global_max_pool(tensor)
y = self.global_avg_pool(tensor)
return ME.cat(x, y)
class MinkowskiSplatFCNN(MinkowskiFCNN):
def __init__(
self,
in_channel,
out_channel,
embedding_channel=1024,
channels=(32, 48, 64, 96, 128),
D=3,
):
MinkowskiFCNN.__init__(
self, in_channel, out_channel, embedding_channel, channels, D
)
def forward(self, x: ME.TensorField):
x = self.mlp1(x)
y = x.splat()
y = self.conv1(y)
y1 = self.pool(y)
y = self.conv2(y1)
y2 = self.pool(y)
y = self.conv3(y2)
y3 = self.pool(y)
y = self.conv4(y3)
y4 = self.pool(y)
x1 = y1.interpolate(x)
x2 = y2.interpolate(x)
x3 = y3.interpolate(x)
x4 = y4.interpolate(x)
x = ME.cat(x1, x2, x3, x4)
y = self.conv5(x.sparse())
x1 = self.global_max_pool(y)
x2 = self.global_avg_pool(y)
return self.final(ME.cat(x1, x2)).F
STR2NETWORK = dict(
pointnet=PointNet,
minkpointnet=MinkowskiPointNet,
minkfcnn=MinkowskiFCNN,
minksplatfcnn=MinkowskiSplatFCNN,
)
def create_input_batch(batch, is_minknet, device="cuda", quantization_size=0.05):
if is_minknet:
batch["coordinates"][:, 1:] = batch["coordinates"][:, 1:] / quantization_size
return ME.TensorField(
coordinates=batch["coordinates"],
features=batch["features"],
device=device,
)
else:
return batch["coordinates"].permute(0, 2, 1).to(device)
class CoordinateTranslation:
def __init__(self, translation):
self.trans = translation
def __call__(self, coords):
if self.trans > 0:
coords += np.random.uniform(low=-self.trans, high=self.trans, size=[1, 3])
return coords
def make_data_loader(phase, is_minknet, config):
assert phase in ["train", "val", "test"]
is_train = phase == "train"
dataset = ModelNet40H5(
phase=phase,
transform=CoordinateTransformation(trans=config.translation)
if is_train
else CoordinateTranslation(config.test_translation),
data_root="modelnet40_ply_hdf5_2048",
)
return DataLoader(
dataset,
num_workers=config.num_workers,
shuffle=is_train,
collate_fn=minkowski_collate_fn if is_minknet else stack_collate_fn,
batch_size=config.batch_size,
)
def test(net, device, config, phase="val"):
is_minknet = isinstance(net, ME.MinkowskiNetwork)
data_loader = make_data_loader(
"test",
is_minknet,
config=config,
)
net.eval()
labels, preds = [], []
with torch.no_grad():
for batch in data_loader:
input = create_input_batch(
batch,
is_minknet,
device=device,
quantization_size=config.voxel_size,
)
logit = net(input)
pred = torch.argmax(logit, 1)
labels.append(batch["labels"].cpu().numpy())
preds.append(pred.cpu().numpy())
torch.cuda.empty_cache()
return metrics.accuracy_score(np.concatenate(labels), np.concatenate(preds))
def criterion(pred, labels, smoothing=True):
"""Calculate cross entropy loss, apply label smoothing if needed."""
labels = labels.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, labels.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, labels, reduction="mean")
return loss
def train(net, device, config):
is_minknet = isinstance(net, ME.MinkowskiNetwork)
optimizer = optim.SGD(
net.parameters(),
lr=config.lr,
momentum=0.9,
weight_decay=config.weight_decay,
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=config.max_steps,
)
print(optimizer)
print(scheduler)
train_iter = iter(make_data_loader("train", is_minknet, config))
best_metric = 0
net.train()
for i in range(config.max_steps):
optimizer.zero_grad()
try:
data_dict = train_iter.next()
except StopIteration:
train_iter = iter(make_data_loader("train", is_minknet, config))
data_dict = train_iter.next()
input = create_input_batch(
data_dict, is_minknet, device=device, quantization_size=config.voxel_size
)
logit = net(input)
loss = criterion(logit, data_dict["labels"].to(device))
loss.backward()
optimizer.step()
scheduler.step()
torch.cuda.empty_cache()
if i % config.stat_freq == 0:
print(f"Iter: {i}, Loss: {loss.item():.3e}")
if i % config.val_freq == 0 and i > 0:
torch.save(
{
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"curr_iter": i,
},
config.weights,
)
accuracy = test(net, device, config, phase="val")
if best_metric < accuracy:
best_metric = accuracy
print(f"Validation accuracy: {accuracy}. Best accuracy: {best_metric}")
net.train()
if __name__ == "__main__":
config = parser.parse_args()
seed_all(config.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("===================ModelNet40 Dataset===================")
print(f"Training with translation {config.translation}")
print(f"Evaluating with translation {config.test_translation}")
print("=============================================\n\n")
net = STR2NETWORK[config.network](
in_channel=3, out_channel=40, embedding_channel=1024
).to(device)
print("===================Network===================")
print(net)
print("=============================================\n\n")
train(net, device, config)
accuracy = test(net, device, config, phase="test")
print(f"Test accuracy: {accuracy}")
| [
"torch.nn.init.constant_",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.nn.Module.__init__",
"torch.nn.functional.log_softmax",
"torch.nn.functional.cross_entropy",
"torch.cuda.empty_cache",
"torch.utils.data.DataLoader",
"torch.cuda.is_available",
"torch.zeros_like",
"torch.argmax"
] | 1.4 | NNstorm/MinkowskiEngine | 443b37a58c379b2482b5d160d9e874b356b4bf2f |
1.4 | # Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import unittest
import numpy as np
import torch
from MinkowskiEngine import (
SparseTensor,
SparseTensorOperationMode,
SparseTensorQuantizationMode,
set_sparse_tensor_operation_mode,
clear_global_coordinate_manager,
is_cuda_available,
)
from MinkowskiEngine.utils import batched_coordinates, sparse_quantize, sparse_collate
from tests.python.common import data_loader, load_file
class SparseTensorTestCase(unittest.TestCase):
def test(self):
print(f"{self.__class__.__name__}: test SparseTensor")
coords, feats, labels = data_loader(nchannel=2)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_empty(self):
print(f"{self.__class__.__name__}: test_empty SparseTensor")
feats = torch.FloatTensor(0, 16)
coords = torch.IntTensor(0, 4)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_tensor_stride(self):
print(f"{self.__class__.__name__}: test_tensor_stride SparseTensor")
feats = torch.FloatTensor(4, 16)
coords = torch.IntTensor(
[[0, 4, 2, 1], [0, 4, 0, 0], [0, 4, 4, 4], [0, 4, 4, 7]]
)
print(coords)
input = SparseTensor(feats, coordinates=coords, tensor_stride=4)
self.assertEqual(input.tensor_stride, [4, 4, 4])
print(input)
def test_force_creation(self):
print(f"{self.__class__.__name__}: test_force_creation")
coords, feats, labels = data_loader(nchannel=2)
input1 = SparseTensor(feats, coordinates=coords)
input2 = SparseTensor(
feats, coordinates=coords, coordinate_manager=input1.coordinate_manager
)
print(input1.coordinate_map_key, input2.coordinate_map_key)
def test_device(self):
print(f"{self.__class__.__name__}: test_device SparseTensor")
if not is_cuda_available():
return
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
SparseTensor(feats.to(0), coords.to(0))
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)
st = SparseTensor(feats, coords, device=feats.device)
print(st)
def test_device_unique(self):
print(f"{self.__class__.__name__}: test_device_unique SparseTensor")
if not is_cuda_available():
return
coords = torch.IntTensor(
[[0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1], [1, 2]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
SparseTensor(feats.to(0), coords.to(0))
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)
st = SparseTensor(feats, coords, device=feats.device)
print(st)
def test_device2(self):
print(f"{self.__class__.__name__}: test_device2 SparseTensor")
if not is_cuda_available():
return
coordinates = np.random.rand(8192,3) * 200
quant_coordinates, quant_features = sparse_quantize(coordinates, coordinates)
bcoords, bfeats = sparse_collate([quant_coordinates], [quant_features])
bcoords, bfeats = bcoords.cuda(), bfeats.cuda()
print(bcoords, bfeats)
SparseTensor(bfeats, bcoords)
def test_quantization(self):
print(f"{self.__class__.__name__}: test_quantization")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_quantization_gpu(self):
print(f"{self.__class__.__name__}: test_quantization_gpu")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
print(input)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
print(sinput)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_extraction(self):
print(f"{self.__class__.__name__}: test_extraction")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords)
C0 = X.coordinates_at(0)
F0 = X.features_at(0)
self.assertTrue(0 in C0)
self.assertTrue(1 in C0)
self.assertTrue(2 in C0)
self.assertTrue(1.1 in F0)
self.assertTrue(2.1 in F0)
self.assertTrue(3.1 in F0)
CC0, FC0 = X.coordinates_and_features_at(0)
self.assertTrue((C0 == CC0).all())
self.assertTrue((F0 == FC0).all())
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
if not is_cuda_available():
return
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords, device=0)
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
def test_features_at_coordinates(self):
print(f"{self.__class__.__name__}: test_features_at_coordinates")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(features=feats, coordinates=coords)
feats = X.features_at_coordinates(
torch.FloatTensor([[0, 0], [0, 1], [0, 2], [2, 2], [0, 0], [0, 0.5]])
).flatten()
self.assertTrue(feats[0] == 1.1)
self.assertTrue(feats[3] == 5.1)
self.assertTrue(feats[4] == 1.1)
def test_decomposition(self):
print(f"{self.__class__.__name__}: test_decomposition")
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats, bcoords)
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_decomposition_gpu(self):
print(f"{self.__class__.__name__}: test_decomposition_gpu")
if not torch.cuda.is_available():
return
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats.to(0), bcoords.to(0))
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_operation_mode(self):
print(f"{self.__class__.__name__}: test_operation_mode")
# Set to use the global sparse tensor coords manager by default
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SHARE_COORDINATE_MANAGER
)
coords, feats, labels = data_loader(nchannel=2)
# Create a sparse tensor on two different coordinates.
A = SparseTensor(torch.rand(feats.shape), coordinates=coords)
B = SparseTensor(
torch.rand(4, 2),
coordinates=torch.IntTensor([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]),
)
self.assertTrue(A.coordinate_manager == B.coordinate_manager)
A.requires_grad_(True)
B.requires_grad_(True)
C = A + B
C.F.sum().backward()
self.assertTrue(torch.all(A.F.grad == 1).item())
self.assertTrue(torch.all(B.F.grad == 1).item())
C = A - B
C = A * B
C = A / B
# Inplace
A.requires_grad_(False)
D = SparseTensor(
torch.rand(feats.shape),
coordinate_map_key=A.coordinate_map_key,
coordinate_manager=A.coordinate_manager,
)
A -= D
A *= D
A /= D
clear_global_coordinate_manager()
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER
)
| [
"torch.rand",
"torch.IntTensor",
"torch.FloatTensor",
"torch.from_numpy",
"torch.all",
"torch.cuda.is_available"
] | 1.4 | NNstorm/MinkowskiEngine | 443b37a58c379b2482b5d160d9e874b356b4bf2f |
1.7 | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
"""
COCO dataset which returns image_id for evaluation.
Mostly copy-paste from https://github.com/ashkamath/mdetr/blob/main/datasets/gqa.py
"""
import json
from pathlib import Path
import torch
import torchvision
from transformers import RobertaTokenizerFast
from .coco import ConvertCocoPolysToMask, ModulatedDetection, make_coco_transforms
class VQAv2Detection(ModulatedDetection):
pass
class VQAv2QuestionAnswering(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms, return_masks, return_tokens, tokenizer, ann_folder):
super(VQAv2QuestionAnswering, self).__init__(img_folder, ann_file)
self._transforms = transforms
self.prepare = ConvertCocoPolysToMask(return_masks, return_tokens, tokenizer=tokenizer)
with open(ann_folder / "vqa2_answer2id.json", "r") as f:
self.answer2id = json.load(f)
with open(ann_folder / "vqa2_answer2id_by_type.json", "r") as f:
self.answer2id_by_type = json.load(f)
self.type2id = {"yes/no": 0, "number": 1, "other": 2}
def __getitem__(self, idx):
img, target = super(VQAv2QuestionAnswering, self).__getitem__(idx)
image_id = self.ids[idx]
coco_img = self.coco.loadImgs(image_id)[0]
caption = coco_img["caption"]
dataset_name = coco_img["dataset_name"]
questionId = coco_img["questionId"]
target = {"image_id": image_id, "annotations": target, "caption": caption}
img, target = self.prepare(img, target)
if self._transforms is not None:
img, target = self._transforms(img, target)
target["dataset_name"] = dataset_name
target["questionId"] = questionId
if coco_img["answer"] not in self.answer2id:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer"] = torch.as_tensor(self.answer2id[answer], dtype=torch.long)
target["answer_type"] = torch.as_tensor(self.type2id[coco_img["answer_type"]], dtype=torch.long)
# util.misc.collate_fn requires to put 'answer' before every type of answer in target
if coco_img["answer"] not in self.answer2id_by_type["yes/no"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_yes/no"] = torch.as_tensor(
self.answer2id_by_type["yes/no"][answer] if coco_img["answer_type"] == "yes/no" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["number"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_number"] = torch.as_tensor(
self.answer2id_by_type["number"][answer] if coco_img["answer_type"] == "number" else -100,
dtype=torch.long,
)
if coco_img["answer"] not in self.answer2id_by_type["other"]:
answer = "unknown"
else:
answer = coco_img["answer"]
target["answer_other"] = torch.as_tensor(
self.answer2id_by_type["other"][answer] if coco_img["answer_type"] == "other" else -100,
dtype=torch.long,
)
return img, target
def build(image_set, args):
# TODO: img or all?
img_dir = Path(args.coco_img_path)
assert img_dir.exists(), f"provided COCO img path {img_dir} does not exist"
tokenizer = RobertaTokenizerFast.from_pretrained(args.text_encoder_type)
if args.do_qa:
# Для vqa2 это не нужно:
# assert args.vqa2_split_type is not None
if image_set == "train":
datasets = []
for imset in ["train", "minival"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{imset}.json"
datasets.append(
VQAv2QuestionAnswering(
img_dir / "train2014" if imset == "train" else img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
)
return torch.utils.data.ConcatDataset(datasets)
elif image_set == "val":
# TODO: правильный ли ann_file?
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_minival.json"
return VQAv2QuestionAnswering(
img_dir / "val2014",
ann_file,
transforms=make_coco_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
elif image_set in ["test", "testdev", "trainval"]:
ann_file = Path(args.vqa2_ann_path) / f"finetune_vqa2_{image_set}.json"
return VQAv2QuestionAnswering(
img_dir / "test2015",
ann_file,
transforms=make_coco_transforms("val", cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
ann_folder=Path(args.vqa2_ann_path),
)
else:
assert False, f"Unknown image set {image_set}"
| [
"torch.utils.data.ConcatDataset",
"torch.as_tensor"
] | 1.7.0 | TopCoder2K/mdetr | aedfd63f550ae36d1477484c489a2aa438d10aa3 |
1.6 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nnunet.training.loss_functions.TopK_loss import TopKLoss
from nnunet.training.loss_functions.crossentropy import RobustCrossEntropyLoss
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from torch import nn
import numpy as np
class GDL(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, square_volumes=False):
"""
square_volumes will square the weight term. The paper recommends square_volumes=True; I don't (just an intuition)
"""
super(GDL, self).__init__()
self.square_volumes = square_volumes
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
gt = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
if not self.do_bg:
x = x[:, 1:]
y_onehot = y_onehot[:, 1:]
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y_onehot, axes, loss_mask, self.square)
# GDL weight computation, we use 1/V
volumes = sum_tensor(y_onehot, axes) + 1e-6 # add some eps to prevent div by zero
if self.square_volumes:
volumes = volumes ** 2
# apply weights
tp = tp / volumes
fp = fp / volumes
fn = fn / volumes
# sum over classes
if self.batch_dice:
axis = 0
else:
axis = 1
tp = tp.sum(axis, keepdim=False)
fp = fp.sum(axis, keepdim=False)
fn = fn.sum(axis, keepdim=False)
# compute dice
dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)
dc = dc.mean()
return -dc
def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes: can be (, ) = no summation
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
tn = (1 - net_output) * (1 - y_onehot)
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tn = tn ** 2
if len(axes) > 0:
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
tn = sum_tensor(tn, axes, keepdim=False)
return tp, fp, fn, tn
class SoftDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
"""
super(SoftDiceLoss, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
nominator = 2 * tp + self.smooth
denominator = 2 * tp + fp + fn + self.smooth
dc = nominator / (denominator + 1e-8)
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class MCCLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_mcc=False, do_bg=True, smooth=0.0):
"""
based on matthews correlation coefficient
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
Does not work. Really unstable. F this.
"""
super(MCCLoss, self).__init__()
self.smooth = smooth
self.do_bg = do_bg
self.batch_mcc = batch_mcc
self.apply_nonlin = apply_nonlin
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
voxels = np.prod(shp_x[2:])
if self.batch_mcc:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn, tn = get_tp_fp_fn_tn(x, y, axes, loss_mask, False)
tp /= voxels
fp /= voxels
fn /= voxels
tn /= voxels
nominator = tp * tn - fp * fn + self.smooth
denominator = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5 + self.smooth
mcc = nominator / denominator
if not self.do_bg:
if self.batch_mcc:
mcc = mcc[1:]
else:
mcc = mcc[:, 1:]
mcc = mcc.mean()
return -mcc
class SoftDiceLossSquared(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super(SoftDiceLossSquared, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
with torch.no_grad():
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(x.shape, y.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = y
else:
y = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, y, 1).float()
intersect = x * y_onehot
# values in the denominator get smoothed
denominator = x ** 2 + y_onehot ** 2
# aggregation was previously done in get_tp_fp_fn, but needs to be done here now (needs to be done after
# squaring)
intersect = sum_tensor(intersect, axes, False) + self.smooth
denominator = sum_tensor(denominator, axes, False) + self.smooth
dc = 2 * intersect / denominator
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1,
log_dice=False, ignore_label=None):
"""
CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
:param soft_dice_kwargs:
:param ce_kwargs:
:param aggregate:
:param square_dice:
:param weight_ce:
:param weight_dice:
"""
super(DC_and_CE_loss, self).__init__()
if ignore_label is not None:
assert not square_dice, 'not implemented'
ce_kwargs['reduction'] = 'none'
self.log_dice = log_dice
self.weight_dice = weight_dice
self.weight_ce = weight_ce
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.ignore_label = ignore_label
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
"""
target must be b, c, x, y(, z) with c=1
:param net_output:
:param target:
:return:
"""
if self.ignore_label is not None:
assert target.shape[1] == 1, 'not implemented for one hot encoding'
mask = target != self.ignore_label
target[~mask] = 0
mask = mask.float()
else:
mask = None
dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0
if self.log_dice:
dc_loss = -torch.log(-dc_loss)
ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0
if self.ignore_label is not None:
ce_loss *= mask[:, 0]
ce_loss = ce_loss.sum() / mask.sum()
if self.aggregate == "sum":
result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class ATM_and_DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False, weight_ce=1, weight_dice=1, weight_atm=0.5,
log_dice=False, ignore_label=None):
"""
CAREFUL. Weights for CE and Dice do not need to sum to one. You can set whatever you want.
:param soft_dice_kwargs:
:param ce_kwargs:
:param aggregate:
:param square_dice:
:param weight_ce:
:param weight_dice:
"""
super(ATM_and_DC_and_CE_loss, self).__init__()
if ignore_label is not None:
assert not square_dice, 'not implemented'
ce_kwargs['reduction'] = 'none'
self.log_dice = log_dice
self.weight_dice = weight_dice
self.weight_ce = weight_ce
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.atm = ATM(apply_nonlin=softmax_helper, weight_atm=weight_atm)
self.ignore_label = ignore_label
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
"""
target must be b, c, x, y(, z) with c=1
:param net_output:
:param target:
:return:
"""
if self.ignore_label is not None:
assert target.shape[1] == 1, 'not implemented for one hot encoding'
mask = target != self.ignore_label
target[~mask] = 0
mask = mask.float()
else:
mask = None
net_output = net_output * self.atm(net_output, target)
dc_loss = self.dc(net_output, target, loss_mask=mask) if self.weight_dice != 0 else 0
if self.log_dice:
dc_loss = -torch.log(-dc_loss)
ce_loss = self.ce(net_output, target[:, 0].long()) if self.weight_ce != 0 else 0
if self.ignore_label is not None:
ce_loss *= mask[:, 0]
ce_loss = ce_loss.sum() / mask.sum()
if self.aggregate == "sum":
result = self.weight_ce * ce_loss + self.weight_dice * dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class ATM(nn.Module):
def __init__(self, apply_nonlin=None, weight_atm=0.5):
"""
"""
super(ATM, self).__init__()
self.apply_nonlin = apply_nonlin
self.weight_atm = weight_atm
def forward(self, x, y):
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
atm = torch.exp((x-y)/self.weight_atm)
return atm
class DC_and_BCE_loss(nn.Module):
def __init__(self, bce_kwargs, soft_dice_kwargs, aggregate="sum"):
"""
DO NOT APPLY NONLINEARITY IN YOUR NETWORK!
THIS LOSS IS INTENDED TO BE USED FOR BRATS REGIONS ONLY
:param soft_dice_kwargs:
:param bce_kwargs:
:param aggregate:
"""
super(DC_and_BCE_loss, self).__init__()
self.aggregate = aggregate
self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)
def forward(self, net_output, target):
ce_loss = self.ce(net_output, target)
dc_loss = self.dc(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class GDL_and_CE_loss(nn.Module):
def __init__(self, gdl_dice_kwargs, ce_kwargs, aggregate="sum"):
super(GDL_and_CE_loss, self).__init__()
self.aggregate = aggregate
self.ce = RobustCrossEntropyLoss(**ce_kwargs)
self.dc = GDL(softmax_helper, **gdl_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class DC_and_topk_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum", square_dice=False):
super(DC_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
if not square_dice:
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
else:
self.dc = SoftDiceLossSquared(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
| [
"torch.zeros",
"torch.unbind",
"torch.no_grad",
"torch.nn.BCEWithLogitsLoss",
"torch.log",
"torch.exp"
] | 1.6.0 | Karol-G/nnUNet | a30bdbd64254c94c515ee03617173eb217eea505 |
1.7 | import torch
from torch.optim import Optimizer
class OptimWrapper(Optimizer):
# Mixin class that defines convenient functions for writing Optimizer Wrappers
def __init__(self, optim):
self.optim = optim
def __getstate__(self):
return self.optim.__getstate__()
def __setstate__(self, state):
self.optim.__setstate__(state)
@property
def state(self):
return self.optim.state
@property
def param_groups(self):
return self.optim.param_groups
@param_groups.setter
def param_groups(self, value):
self.optim.param_groups = value
def state_dict(self):
return self.optim.state_dict()
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict)
def zero_grad(self):
self.optim.zero_grad()
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
@property
def defaults(self):
return self.optim.defaults
@defaults.setter
def defaults(self, defaults):
self.optim.defaults = defaults
@torch.no_grad()
def step(self, closure=None):
self.optim.step(closure=closure)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.optim) | [
"torch.no_grad"
] | 1.7.1 | aknckaan/scrl | bff485e27d8785628e35d2cb73dce06f10065b1f |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch CUTOFFBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_cutoffbert import CutoffBertConfig
from ..bert.modeling_bert import BertEmbeddings as CutoffBertEmbeddings
from ..bert.modeling_bert import BertEncoder as CutoffBertEncoder
from ..bert.modeling_bert import BertPooler as CutoffBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "CutoffBertConfig"
_TOKENIZER_FOR_DOC = "CutoffBertTokenizer"
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_cutoffbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class CutoffBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = CutoffBertConfig
load_tf_weights = load_tf_weights_in_cutoffbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
CUTOFFBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
CUTOFFBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare CutoffBert Model transformer outputting raw hidden-states without any specific head on top.",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertModel(CutoffBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = CutoffBertEmbeddings(config)
self.encoder = CutoffBertEncoder(config)
self.pooler = CutoffBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
CutoffBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled
output) + Cut-off data augmentation support.
""",
CUTOFFBERT_START_DOCSTRING,
)
class CutoffBertForSequenceClassification(CutoffBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_token_id = config.cls_token_id
self.sep_token_id = config.sep_token_id
self.mask_token_id = config.mask_token_id
self.masking_prob = config.cutoff_masking_prob
self.temperature = config.cutoff_temperature
self.mask_loss_wgt = config.cutoff_mask_loss_wgt
self.js_loss_wgt = config.cutoff_js_loss_wgt
self.config = config
self.bert = CutoffBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def _apply_cutoff(self, inputs):
masked_inputs = inputs.clone()
valid_masking_indices = (inputs != self.cls_token_id) & (inputs != self.sep_token_id)
random_masking_indices = torch.bernoulli(torch.full(inputs.shape, self.masking_prob, device=inputs.device)).bool()
masking_indices = random_masking_indices & valid_masking_indices
masked_inputs[masking_indices] = self.mask_token_id
return masked_inputs
@add_start_docstrings_to_model_forward(CUTOFFBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.dropout(outputs[1])
logits = self.classifier(pooled_output)
if not return_dict:
return (logits,) + outputs[2:]
return SequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, l = input_ids.size()
masked_input_ids = self._apply_cutoff(input_ids.clone())
flatten_input_ids = torch.stack((input_ids, masked_input_ids), dim=1).reshape(-1, l)
flatten_attention_mask = attention_mask.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.unsqueeze(1).expand(-1, 2, -1).reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.unsqueeze(1).expand(-1, 2, -1, -1).reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.dropout(flatten_outputs[1])
flatten_logits = self.classifier(flatten_pooled_output)
logits, masked_logits = flatten_logits.reshape(b, 2, self.config.num_labels).chunk(2, dim=1)
logits, masked_logits = logits.squeeze(dim=1).contiguous(), masked_logits.squeeze(dim=1).contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if self.mask_loss_wgt is not None and self.mask_loss_wgt > 0.0:
mask_loss = loss_fct(masked_logits.view(-1, self.num_labels), labels.view(-1))
loss += mask_loss * self.mask_loss_wgt
if self.js_loss_wgt is not None and self.js_loss_wgt > 0.0:
kl_loss_fct = KLDivLoss(reduction="batchmean")
src_logits, trg_logits = logits, masked_logits
mean_logits = (src_logits + trg_logits) * 0.5
src_loss = kl_loss_fct(
F.log_softmax(src_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
trg_loss = kl_loss_fct(
F.log_softmax(trg_logits / self.temperature, dim=-1),
F.softmax(mean_logits / self.temperature, dim=-1)
) * (self.temperature ** 2)
js_loss = (src_loss + trg_loss) * 0.5
loss += js_loss * self.js_loss_wgt
if not return_dict:
return (loss, logits)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.stack",
"torch.from_numpy",
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.full",
"torch.nn.KLDivLoss",
"torch.nn.functional.softmax",
"torch.nn.CrossEntropyLoss"
] | 1.0 | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 |
1.0 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch PROMPTBERT model. """
import math
import os
import warnings
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
import torch.nn.functional as F
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss, KLDivLoss
from torch.distributions.beta import Beta
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
DualPassageEncoderModelOutput,
)
from ...modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from ...utils import logging
from .configuration_promptbert import PromptBertConfig
from ..bert.modeling_bert import BertEmbeddings as PromptBertEmbeddings
from ..bert.modeling_bert import BertEncoder as PromptBertEncoder
from ..bert.modeling_bert import BertPooler as PromptBertPooler
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "PromptBertConfig"
_TOKENIZER_FOR_DOC = "PromptBertTokenizer"
PROMPTBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_promptbert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class PromptBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = PromptBertConfig
load_tf_weights = load_tf_weights_in_promptbert
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
PROMPTBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
PROMPTBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare PromptBert Model transformer outputting raw hidden-states without any specific head on top.",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertModel(PromptBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = PromptBertEmbeddings(config)
self.encoder = PromptBertEncoder(config)
self.pooler = PromptBertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
PromptBert Model transformer with a sequence classification head on top (a linear layer on top of the pooled output).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForSequenceClassification(PromptBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = PromptBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a dual encoder head on top for passage retrieval tasks (a linear layer on top of the pooled output
for computing source-target similarity).
""",
PROMPTBERT_START_DOCSTRING,
)
class PromptBertForDualPassageEncoder(PromptBertPreTrainedModel):
def __init__(self, config, cls_loss_wgt=None):
super().__init__(config)
self.num_labels = config.num_labels
self.cls_loss_wgt = cls_loss_wgt
self.bert = PromptBertModel(config)
self.pooler = PromptBertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.cls_loss_wgt is not None and cls_loss_wgt > 0.0:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(PROMPTBERT_INPUTS_DOCSTRING.format("batch_size, 2, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=DualPassageEncoderModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is None or len(input_ids.size()) < 3:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = self.pooler(outputs[0])
pooled_output = self.dropout(pooled_output)
if not return_dict:
return (pooled_output,) + outputs[2:]
return DualPassageEncoderModelOutput(
pooled_output=pooled_output,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
b, _, l = input_ids.size()
flatten_input_ids = input_ids.reshape(-1, l)
flatten_attention_mask = attention_mask.reshape(-1, l) if attention_mask is not None else None
flatten_token_type_ids = token_type_ids.reshape(-1, l) if token_type_ids is not None else None
flatten_position_ids = position_ids.reshape(-1, l) if position_ids is not None else None
flatten_inputs_embeds = inputs_embeds.reshape(-1, l, self.config.hidden_size) if inputs_embeds is not None else None
flatten_outputs = self.bert(
flatten_input_ids,
attention_mask=flatten_attention_mask,
token_type_ids=flatten_token_type_ids,
position_ids=flatten_position_ids,
head_mask=head_mask,
inputs_embeds=flatten_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
flatten_pooled_output = self.pooler(flatten_outputs[0])
src_pooled_output, trg_pooled_output = flatten_pooled_output.reshape(b, 2, self.config.hidden_size).chunk(2, dim=1)
src_pooled_output, trg_pooled_output = src_pooled_output.squeeze(dim=1).contiguous(), trg_pooled_output.squeeze(dim=1).contiguous()
mask = (labels.unsqueeze(-1).expand(-1, b) == labels.unsqueeze(0).expand(b, -1)) & (1 - torch.eye(b)).to(labels.device).bool()
cl_logits = torch.einsum('ik,jk->ij', src_pooled_output, trg_pooled_output).masked_fill(mask, float('-inf'))
cl_labels = torch.arange(b).to(labels.device)
loss_fct = CrossEntropyLoss()
cl_loss = loss_fct(cl_logits.view(-1, labels.size(0)), cl_labels.view(-1))
if self.cls_loss_wgt is not None and self.cls_loss_wgt > 0.0:
flatten_logits = self.classifier(self.dropout(flatten_outputs[1]))
src_logits, trg_logits = flatten_logits.reshape(b, 2, self.num_labels).chunk(2, dim=1)
src_logits, trg_logits = src_logits.squeeze(dim=1).contiguous(), trg_logits.squeeze(dim=1).contiguous()
src_loss = loss_fct(src_logits.view(-1, self.num_labels), labels.view(-1))
trg_loss = loss_fct(trg_logits.view(-1, self.num_labels), labels.view(-1))
cls_loss = src_loss + trg_loss
cls_logits = src_logits + trg_logits
loss = cl_loss + cls_loss * self.cls_loss_wgt
logits = cls_logits
else:
loss = cl_loss
logits = cl_logits
if not return_dict:
return (loss, logits,)
return DualPassageEncoderModelOutput(
loss=loss,
logits=logits,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.einsum",
"torch.arange",
"torch.from_numpy",
"torch.ones",
"torch.eye",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss"
] | 1.0 | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 |
1.5 | import torch
import numpy as np
def get_sigmas(config):
if config.model.sigma_dist == 'geometric':
sigmas = torch.tensor(
np.exp(np.linspace(np.log(config.model.sigma_begin), np.log(config.model.sigma_end),
config.model.num_classes))).float().to(config.device)
elif config.model.sigma_dist == 'uniform':
sigmas = torch.tensor(
np.linspace(config.model.sigma_begin, config.model.sigma_end, config.model.num_classes)
).float().to(config.device)
else:
raise NotImplementedError('sigma distribution not supported')
return sigmas
@torch.no_grad()
def anneal_Langevin_dynamics(x_mod, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True):
images = []
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c #dummy target 1...T depending on iteration
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
#choose whether to add random noise during each gradient ascent step
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
#calculate l2 norms of gradient (score) and the additive noise for logging
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
#calc snr as scaled version of [||s(x, \sigma_i)|| / ||z_t||] and mean of score for logging
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("level: {}, step_size: {}, grad_norm: {}, image_norm: {}, snr: {}, grad_mean_norm: {}".format(
c, step_size, grad_norm.item(), image_norm.item(), snr.item(), grad_mean_norm.item()))
#final denoising step if desired - removes the very last additive z_L
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def langevin_Inverse(x_mod, y, A, scorenet, sigmas, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False, denoise=True, add_noise=True,
decimate_sigma=None, mode=None, true_x=None):
images = []
#if desired, decimate the number of noise scales to speed up inference
if decimate_sigma is not None:
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist() #grab every decimate_sigma'th value except the last one
sigmas_temp.append(sigmas[-1]) #add the last sigma value back to the list
# num_sigmas = sigmas.shape[0] // decimate_sigma
# sigmas_temp = []
# for i in range(num_sigmas):
# sigmas_temp.append(sigmas[-1])
sigmas = sigmas_temp #swap the new decimated sigma list for the main one
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=5, stop=1, num=len(sigmas))
c2 = 1
with torch.no_grad():
#outer loop over noise scales
for c, sigma in enumerate(sigmas):
#dummy target 1...T depending on iteration
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
#step_size = step_lr * (sigma / sigmas[-1]) ** 2
step_size = steps[c]
#Inner loop over T
for s in range(n_steps_each):
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels)
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#prior_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2 * sigma ** 2
#calculate the maximum likelihood gradient - i.e. MSE gradient
#A should be [N, m, C * H * W], x should be [N, C, H, W], y should be [N, m, 1]
if mode=='denoising':
Axt = x_mod
mle_grad = (Axt - y) * (1 / N) #for denoising, y has same dimension as x
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2 #MSE gradient
#mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * (1 / N) #L1 error gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#likelihood_mean_norm = torch.norm(mle_grad.mean(dim=0).view(-1)) ** 2
if c == 0 and s == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
#The final gradient
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
#grad_mean_norm = torch.norm(grad.mean(dim=0).view(-1)) ** 2
#choose whether to add random noise during each gradient ascent step
if add_noise:
noise = torch.randn_like(x_mod)
else:
noise = torch.zeros_like(x_mod)
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2) #core Langevin step
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
snr = np.sqrt(step_size / 2.) * prior_norm / noise_norm
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print("\nlevel: {}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
#final denoising step if desired - removes the very last additive z_L
if denoise:
last_noise = (len(sigmas) - 1) * torch.ones(x_mod.shape[0], device=x_mod.device)
last_noise = last_noise.long()
x_mod = x_mod + sigmas[-1] ** 2 * scorenet(x_mod, last_noise)
images.append(x_mod.to('cpu'))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def inverse_solver(x_mod, y, A, scorenet, sigmas, lr = [5, 1], c1=1, c2=1, auto_c2=True,
final_only=False, verbose=False, likelihood_every=1,
decimate_sigma=None, mode=None, true_x=None, sigma_type = 'subsample', likelihood_type="l2"):
images = []
#if desired, decimate the number of noise scales to speed up inference
if decimate_sigma is not None:
if sigma_type == 'subsample': #grab equally-spaced sigma values
sigmas_temp = sigmas[0:-1:decimate_sigma].tolist()
sigmas_temp.append(sigmas[-1])
elif sigma_type == 'last': #grab just the last sigma value multiple times
num_sigmas = sigmas.shape[0] // decimate_sigma
sigmas_temp = []
for i in range(num_sigmas):
sigmas_temp.append(sigmas[-1])
else:
sigmas_temp = sigmas
sigmas = sigmas_temp
mse = torch.nn.MSELoss()
N, C, H, W = x_mod.shape
steps = np.geomspace(start=lr[0], stop=lr[1], num=len(sigmas))
likelihood_norm = 0
with torch.no_grad():
if sigma_type == 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * 1099
labels = labels.long()
for c, sigma in enumerate(sigmas):
if sigma_type == 'subsample':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * decimate_sigma * c
labels = labels.long()
elif sigma_type != 'last':
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = steps[c]
#s(x_t) ~= \grad_x log p(x) -- THE PRIOR
grad = scorenet(x_mod, labels) * c1
prior_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
if c % likelihood_every == 0:
#\grad_x log p(y | x) -- LIKELIHOOD
if mode=='denoising':
Axt = x_mod
if likelihood_type == "l2":
mle_grad = (Axt - y) * c2
elif likelihood_type == "l1":
mle_grad = torch.sign(Axt - y) * c2
else:
Axt = torch.matmul(A, x_mod.view(N, -1, 1))
if likelihood_type == "l2":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), Axt - y).view(N, C, H, W) * c2
elif likelihood_type == "l1":
mle_grad = torch.matmul(torch.transpose(A, -2, -1), torch.sign(Axt - y)).view(N, C, H, W) * c2
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
if auto_c2 and c == 0:
c2 = prior_norm.item() / likelihood_norm.item()
mle_grad = mle_grad * c2 #MSE gradient
likelihood_norm = torch.norm(mle_grad.view(mle_grad.shape[0], -1), dim=-1).mean()
grad = grad - mle_grad
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad
#x_mod = torch.clamp(x_mod, 0.0, 1.0)
#calc l2 norm of iterate variable for logging
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
mse_iter = mse(Axt, y)
if true_x is not None:
mse_true = mse(true_x, x_mod)
if not final_only:
images.append(x_mod.cpu())
if verbose:
print("\n iteration: {}, sigma: {:.4f}, step_size: {:.4f}, prior_norm: {:.4f}, likelihood_norm: {:.4f}, grad_norm: {:.4f} \
image_norm: {:.4f}, train_mse: {:.4f}".format( \
c, sigma, step_size, prior_norm.item(), likelihood_norm.item(), grad_norm.item(), image_norm.item(), \
mse_iter.item()))
if true_x is not None:
print("true_mse: {:.4f}".format(mse_true.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images
@torch.no_grad()
def anneal_Langevin_dynamics_inpainting(x_mod, refer_image, scorenet, sigmas, image_size,
n_steps_each=100, step_lr=0.000008):
"""
Currently only good for 32x32 images. Assuming the right half is missing.
"""
images = []
#refer_image is the untainted x (?)
#right now this only works with 3-channel images
refer_image = refer_image.unsqueeze(1).expand(-1, x_mod.shape[1], -1, -1, -1)
refer_image = refer_image.contiguous().view(-1, 3, image_size, image_size)
x_mod = x_mod.view(-1, 3, image_size, image_size)
cols = image_size // 2
half_refer_image = refer_image[..., :cols]
with torch.no_grad():
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
images.append(x_mod.to('cpu'))
corrupted_half_image = half_refer_image + torch.randn_like(half_refer_image) * sigma
x_mod[:, :, :, :cols] = corrupted_half_image
noise = torch.randn_like(x_mod) * np.sqrt(step_size * 2)
grad = scorenet(x_mod, labels)
x_mod = x_mod + step_size * grad + noise
print("class: {}, step_size: {}, mean {}, max {}".format(c, step_size, grad.abs().mean(),
grad.abs().max()))
return images
@torch.no_grad()
def anneal_Langevin_dynamics_interpolation(x_mod, scorenet, sigmas, n_interpolations, n_steps_each=200, step_lr=0.000008,
final_only=False, verbose=False):
images = []
n_rows = x_mod.shape[0]
x_mod = x_mod[:, None, ...].repeat(1, n_interpolations, 1, 1, 1)
x_mod = x_mod.reshape(-1, *x_mod.shape[2:])
for c, sigma in enumerate(sigmas):
labels = torch.ones(x_mod.shape[0], device=x_mod.device) * c
labels = labels.long()
step_size = step_lr * (sigma / sigmas[-1]) ** 2
for s in range(n_steps_each):
grad = scorenet(x_mod, labels)
noise_p = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
noise_q = torch.randn(n_rows, x_mod.shape[1], x_mod.shape[2], x_mod.shape[3],
device=x_mod.device)
angles = torch.linspace(0, np.pi / 2., n_interpolations, device=x_mod.device)
noise = noise_p[:, None, ...] * torch.cos(angles)[None, :, None, None, None] + \
noise_q[:, None, ...] * torch.sin(angles)[None, :, None, None, None]
noise = noise.reshape(-1, *noise.shape[2:])
grad_norm = torch.norm(grad.view(grad.shape[0], -1), dim=-1).mean()
noise_norm = torch.norm(noise.view(noise.shape[0], -1), dim=-1).mean()
image_norm = torch.norm(x_mod.view(x_mod.shape[0], -1), dim=-1).mean()
x_mod = x_mod + step_size * grad + noise * np.sqrt(step_size * 2)
snr = np.sqrt(step_size / 2.) * grad_norm / noise_norm
if not final_only:
images.append(x_mod.to('cpu'))
if verbose:
print(
"level: {}, step_size: {}, image_norm: {}, grad_norm: {}, snr: {}".format(
c, step_size, image_norm.item(), grad_norm.item(), snr.item()))
if final_only:
return [x_mod.to('cpu')]
else:
return images | [
"torch.cos",
"torch.nn.MSELoss",
"torch.sin",
"torch.no_grad",
"torch.linspace",
"torch.sign",
"torch.ones",
"torch.randn_like",
"torch.zeros_like",
"torch.transpose",
"torch.randn"
] | 1.5.0 | Sriram-Ravula/ncsnv2 | f610b59441a34063fae1c02aa06837b7eec95c03 |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 7