repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/rmm/python
|
rapidsai_public_repos/rmm/python/rmm/rmm.py
|
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rmm import mr
# Utility Functions
class RMMError(Exception):
def __init__(self, errcode, msg):
self.errcode = errcode
super(RMMError, self).__init__(msg)
_reinitialize_hooks = []
def reinitialize(
pool_allocator=False,
managed_memory=False,
initial_pool_size=None,
maximum_pool_size=None,
devices=0,
logging=False,
log_file_name=None,
):
"""
Finalizes and then initializes RMM using the options passed. Using memory
from a previous initialization of RMM is undefined behavior and should be
avoided.
Parameters
----------
pool_allocator : bool, default False
If True, use a pool allocation strategy which can greatly improve
performance.
managed_memory : bool, default False
If True, use managed memory for device memory allocation
initial_pool_size : int, default None
When `pool_allocator` is True, this indicates the initial pool size in
bytes. By default, 1/2 of the total GPU memory is used.
When `pool_allocator` is False, this argument is ignored if provided.
maximum_pool_size : int, default None
When `pool_allocator` is True, this indicates the maximum pool size in
bytes. By default, the total available memory on the GPU is used.
When `pool_allocator` is False, this argument is ignored if provided.
devices : int or List[int], default 0
GPU device IDs to register. By default registers only GPU 0.
logging : bool, default False
If True, enable run-time logging of all memory events
(alloc, free, realloc).
This has a significant performance impact.
log_file_name : str
Name of the log file. If not specified, the environment variable
``RMM_LOG_FILE`` is used. A ``ValueError`` is thrown if neither is
available. A separate log file is produced for each device, and the
suffix `".dev{id}"` is automatically added to the log file name.
Notes
-----
Note that if you use the environment variable ``CUDA_VISIBLE_DEVICES`` with
logging enabled, the suffix may not be what you expect. For example, if you
set ``CUDA_VISIBLE_DEVICES=1``, the log file produced will still have
suffix ``0``. Similarly, if you set ``CUDA_VISIBLE_DEVICES=1,0`` and use
devices 0 and 1, the log file with suffix ``0`` will correspond to the GPU
with device ID ``1``. Use `rmm.get_log_filenames()` to get the log file
names corresponding to each device.
"""
for func, args, kwargs in reversed(_reinitialize_hooks):
func(*args, **kwargs)
mr._initialize(
pool_allocator=pool_allocator,
managed_memory=managed_memory,
initial_pool_size=initial_pool_size,
maximum_pool_size=maximum_pool_size,
devices=devices,
logging=logging,
log_file_name=log_file_name,
)
def is_initialized():
"""
Returns True if RMM has been initialized, False otherwise.
"""
return mr.is_initialized()
def register_reinitialize_hook(func, *args, **kwargs):
"""
Add a function to the list of functions ("hooks") that will be
called before :py:func:`~rmm.reinitialize()`.
A user or library may register hooks to perform any necessary
cleanup before RMM is reinitialized. For example, a library with
an internal cache of objects that use device memory allocated by
RMM can register a hook to release those references before RMM is
reinitialized, thus ensuring that the relevant device memory
resource can be deallocated.
Hooks are called in the *reverse* order they are registered. This
is useful, for example, when a library registers multiple hooks
and needs them to run in a specific order for cleanup to be safe.
Hooks cannot rely on being registered in a particular order
relative to hooks registered by other packages, since that is
determined by package import ordering.
Parameters
----------
func : callable
Function to be called before :py:func:`~rmm.reinitialize()`
args, kwargs
Positional and keyword arguments to be passed to `func`
"""
global _reinitialize_hooks
_reinitialize_hooks.append((func, args, kwargs))
return func
def unregister_reinitialize_hook(func):
"""
Remove `func` from list of hooks that will be called before
:py:func:`~rmm.reinitialize()`.
If `func` was registered more than once, every instance of it will
be removed from the list of hooks.
"""
global _reinitialize_hooks
_reinitialize_hooks = [x for x in _reinitialize_hooks if x[0] != func]
| 0 |
rapidsai_public_repos/rmm/python
|
rapidsai_public_repos/rmm/python/rmm/mr.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rmm._lib.memory_resource import (
BinningMemoryResource,
CallbackMemoryResource,
CudaAsyncMemoryResource,
CudaMemoryResource,
DeviceMemoryResource,
FailureCallbackResourceAdaptor,
FixedSizeMemoryResource,
LimitingResourceAdaptor,
LoggingResourceAdaptor,
ManagedMemoryResource,
PoolMemoryResource,
StatisticsResourceAdaptor,
TrackingResourceAdaptor,
UpstreamResourceAdaptor,
_flush_logs,
_initialize,
disable_logging,
enable_logging,
get_current_device_resource,
get_current_device_resource_type,
get_log_filenames,
get_per_device_resource,
get_per_device_resource_type,
is_initialized,
set_current_device_resource,
set_per_device_resource,
)
__all__ = [
"BinningMemoryResource",
"CallbackMemoryResource",
"CudaAsyncMemoryResource",
"CudaMemoryResource",
"DeviceMemoryResource",
"FixedSizeMemoryResource",
"LimitingResourceAdaptor",
"LoggingResourceAdaptor",
"ManagedMemoryResource",
"PoolMemoryResource",
"StatisticsResourceAdaptor",
"TrackingResourceAdaptor",
"FailureCallbackResourceAdaptor",
"UpstreamResourceAdaptor",
"_flush_logs",
"_initialize",
"set_per_device_resource",
"enable_logging",
"disable_logging",
"get_per_device_resource",
"set_current_device_resource",
"get_current_device_resource",
"get_per_device_resource_type",
"get_current_device_resource_type",
"get_log_filenames",
"is_initialized",
]
| 0 |
rapidsai_public_repos/rmm/python
|
rapidsai_public_repos/rmm/python/rmm/__init__.py
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rmm import mr
from rmm._lib.device_buffer import DeviceBuffer
from rmm._lib.logger import (
flush_logger,
get_flush_level,
get_logging_level,
logging_level,
set_flush_level,
set_logging_level,
should_log,
)
from rmm._version import __git_commit__, __version__
from rmm.mr import disable_logging, enable_logging, get_log_filenames
from rmm.rmm import (
RMMError,
is_initialized,
register_reinitialize_hook,
reinitialize,
unregister_reinitialize_hook,
)
__all__ = [
"DeviceBuffer",
"disable_logging",
"RMMError",
"enable_logging",
"flush_logger",
"get_flush_level",
"get_log_filenames",
"get_logging_level",
"is_initialized",
"logging_level",
"mr",
"register_reinitialize_hook",
"reinitialize",
"set_flush_level",
"set_logging_level",
"should_log",
"unregister_reinitialize_hook",
]
| 0 |
rapidsai_public_repos/rmm/python
|
rapidsai_public_repos/rmm/python/rmm/VERSION
|
24.02.00
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_cuda/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources stream.pyx)
set(linked_libraries rmm::rmm)
rapids_cython_create_modules(SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}"
CXX)
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_cuda/gpu.py
|
# Copyright (c) 2020, NVIDIA CORPORATION.
from cuda import cuda, cudart
class CUDARuntimeError(RuntimeError):
def __init__(self, status: cudart.cudaError_t):
self.status = status
_, name = cudart.cudaGetErrorName(status)
_, msg = cudart.cudaGetErrorString(status)
super(CUDARuntimeError, self).__init__(
f"{name.decode()}: {msg.decode()}"
)
def __reduce__(self):
return (type(self), (self.status,))
class CUDADriverError(RuntimeError):
def __init__(self, status: cuda.CUresult):
self.status = status
_, name = cuda.cuGetErrorName(status)
_, msg = cuda.cuGetErrorString(status)
super(CUDADriverError, self).__init__(
f"{name.decode()}: {msg.decode()}"
)
def __reduce__(self):
return (type(self), (self.status,))
def driverGetVersion():
"""
Returns in the latest version of CUDA supported by the driver.
The version is returned as (1000 major + 10 minor). For example,
CUDA 9.2 would be represented by 9020. If no driver is installed,
then 0 is returned as the driver version.
This function automatically raises CUDARuntimeError with error message
and status code.
"""
status, version = cudart.cudaDriverGetVersion()
if status != cudart.cudaError_t.cudaSuccess:
raise CUDARuntimeError(status)
return version
def getDevice():
"""
Get the current CUDA device
"""
status, device = cudart.cudaGetDevice()
if status != cudart.cudaError_t.cudaSuccess:
raise CUDARuntimeError(status)
return device
def setDevice(device: int):
"""
Set the current CUDA device
Parameters
----------
device : int
The ID of the device to set as current
"""
(status,) = cudart.cudaSetDevice(device)
if status != cudart.cudaError_t.cudaSuccess:
raise CUDARuntimeError(status)
def runtimeGetVersion():
"""
Returns the version number of the current CUDA Runtime instance.
The version is returned as (1000 major + 10 minor). For example,
CUDA 9.2 would be represented by 9020.
This calls numba.cuda.runtime.get_version() rather than cuda-python due to
current limitations in cuda-python.
"""
# TODO: Replace this with `cuda.cudart.cudaRuntimeGetVersion()` when the
# limitation is fixed.
import numba.cuda
major, minor = numba.cuda.runtime.get_version()
return major * 1000 + minor * 10
def getDeviceCount():
"""
Returns the number of devices with compute capability greater or
equal to 2.0 that are available for execution.
This function automatically raises CUDARuntimeError with error message
and status code.
"""
status, count = cudart.cudaGetDeviceCount()
if status != cudart.cudaError_t.cudaSuccess:
raise CUDARuntimeError(status)
return count
def getDeviceAttribute(attr: cudart.cudaDeviceAttr, device: int):
"""
Returns information about the device.
Parameters
----------
attr : cudaDeviceAttr
Device attribute to query
device : int
Device number to query
This function automatically raises CUDARuntimeError with error message
and status code.
"""
status, value = cudart.cudaDeviceGetAttribute(attr, device)
if status != cudart.cudaError_t.cudaSuccess:
raise CUDARuntimeError(status)
return value
def getDeviceProperties(device: int):
"""
Returns information about the compute-device.
Parameters
----------
device : int
Device number to query
This function automatically raises CUDARuntimeError with error message
and status code.
"""
status, prop = cudart.cudaGetDeviceProperties(device)
if status != cudart.cudaError_t.cudaSuccess:
raise CUDARuntimeError(status)
return prop
def deviceGetName(device: int):
"""
Returns an identifier string for the device.
Parameters
----------
device : int
Device number to query
This function automatically raises CUDADriverError with error message
and status code.
"""
status, device_name = cuda.cuDeviceGetName(256, cuda.CUdevice(device))
if status != cuda.CUresult.CUDA_SUCCESS:
raise CUDADriverError(status)
return device_name.decode()
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_cuda/stream.pyx
|
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuda.ccudart cimport cudaStream_t
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from rmm._lib.cuda_stream cimport CudaStream
from rmm._lib.cuda_stream_view cimport (
cuda_stream_default,
cuda_stream_legacy,
cuda_stream_per_thread,
cuda_stream_view,
)
cdef class Stream:
def __init__(self, obj=None):
"""
A Stream represents a CUDA stream.
Parameters
----------
obj: optional
* If None (the default), a new CUDA stream is created.
* If a Numba or CuPy stream is provided, we make a thin
wrapper around it.
"""
if obj is None:
self._init_with_new_cuda_stream()
elif isinstance(obj, Stream):
self._init_from_stream(obj)
else:
try:
self._init_from_numba_stream(obj)
except TypeError:
self._init_from_cupy_stream(obj)
@staticmethod
cdef Stream _from_cudaStream_t(cudaStream_t s, object owner=None) except *:
"""
Construct a Stream from a cudaStream_t.
"""
cdef Stream obj = Stream.__new__(Stream)
obj._cuda_stream = s
obj._owner = owner
return obj
cdef cuda_stream_view view(self) except * nogil:
"""
Generate a rmm::cuda_stream_view from this Stream instance
"""
return cuda_stream_view(<cudaStream_t><uintptr_t>(self._cuda_stream))
cdef void c_synchronize(self) except * nogil:
"""
Synchronize the CUDA stream.
This function *must* be called in a `with nogil` block
"""
self.view().synchronize()
def synchronize(self):
"""
Synchronize the CUDA stream
"""
with nogil:
self.c_synchronize()
cdef bool c_is_default(self) except * nogil:
"""
Check if we are the default CUDA stream
"""
return self.view().is_default()
def is_default(self):
"""
Check if we are the default CUDA stream
"""
return self.c_is_default()
def _init_from_numba_stream(self, obj):
from numba import cuda
if isinstance(obj, cuda.cudadrv.driver.Stream):
self._cuda_stream = <cudaStream_t><uintptr_t>(int(obj))
self._owner = obj
else:
raise TypeError(f"Cannot create stream from {type(obj)}")
def _init_from_cupy_stream(self, obj):
try:
import cupy
if isinstance(obj, cupy.cuda.stream.Stream):
self._cuda_stream = <cudaStream_t><uintptr_t>(obj.ptr)
self._owner = obj
return
except ImportError:
pass
raise TypeError(f"Cannot create stream from {type(obj)}")
cdef void _init_with_new_cuda_stream(self) except *:
cdef CudaStream stream = CudaStream()
self._cuda_stream = stream.value()
self._owner = stream
cdef void _init_from_stream(self, Stream stream) except *:
self._cuda_stream, self._owner = stream._cuda_stream, stream._owner
DEFAULT_STREAM = Stream._from_cudaStream_t(cuda_stream_default.value())
LEGACY_DEFAULT_STREAM = Stream._from_cudaStream_t(cuda_stream_legacy.value())
PER_THREAD_DEFAULT_STREAM = Stream._from_cudaStream_t(
cuda_stream_per_thread.value()
)
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_cuda/stream.pxd
|
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuda.ccudart cimport cudaStream_t
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from rmm._lib.cuda_stream_view cimport cuda_stream_view
cdef class Stream:
cdef cudaStream_t _cuda_stream
cdef object _owner
@staticmethod
cdef Stream _from_cudaStream_t(cudaStream_t s, object owner=*)
cdef cuda_stream_view view(self) except * nogil
cdef void c_synchronize(self) except * nogil
cdef bool c_is_default(self) except * nogil
cdef void _init_with_new_cuda_stream(self) except *
cdef void _init_from_stream(self, Stream stream) except *
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/tests/test_rmm_pytorch.py
|
import gc
import pytest
from rmm.allocators.torch import rmm_torch_allocator
torch = pytest.importorskip("torch")
@pytest.fixture(scope="session")
def torch_allocator():
try:
from torch.cuda.memory import change_current_allocator
except ImportError:
pytest.skip("pytorch pluggable allocator not available")
change_current_allocator(rmm_torch_allocator)
def test_rmm_torch_allocator(torch_allocator, stats_mr):
assert stats_mr.allocation_counts["current_bytes"] == 0
x = torch.tensor([1, 2]).cuda()
assert stats_mr.allocation_counts["current_bytes"] > 0
del x
gc.collect()
assert stats_mr.allocation_counts["current_bytes"] == 0
def test_rmm_torch_allocator_using_stream(torch_allocator, stats_mr):
assert stats_mr.allocation_counts["current_bytes"] == 0
s = torch.cuda.Stream()
with torch.cuda.stream(s):
x = torch.tensor([1, 2]).cuda()
torch.cuda.current_stream().wait_stream(s)
assert stats_mr.allocation_counts["current_bytes"] > 0
del x
gc.collect()
assert stats_mr.allocation_counts["current_bytes"] == 0
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/tests/test_rmm.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import gc
import os
import pickle
import warnings
from itertools import product
import numpy as np
import pytest
from numba import cuda
import rmm
import rmm._cuda.stream
from rmm.allocators.cupy import rmm_cupy_allocator
from rmm.allocators.numba import RMMNumbaManager
cuda.set_memory_manager(RMMNumbaManager)
_driver_version = rmm._cuda.gpu.driverGetVersion()
_runtime_version = rmm._cuda.gpu.runtimeGetVersion()
_CUDAMALLOC_ASYNC_SUPPORTED = (_driver_version >= 11020) and (
_runtime_version >= 11020
)
def array_tester(dtype, nelem, alloc):
# data
h_in = np.full(nelem, 3.2, dtype)
h_result = np.empty(nelem, dtype)
d_in = alloc.to_device(h_in)
d_result = alloc.device_array_like(d_in)
d_result.copy_to_device(d_in)
h_result = d_result.copy_to_host()
np.testing.assert_array_equal(h_result, h_in)
_dtypes = [
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
np.bool_,
]
_nelems = [1, 2, 7, 8, 9, 32, 128]
_allocs = [cuda]
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
def test_rmm_alloc(dtype, nelem, alloc):
array_tester(dtype, nelem, alloc)
# Test all combinations of default/managed and pooled/non-pooled allocation
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
@pytest.mark.parametrize(
"managed, pool", list(product([False, True], [False, True]))
)
def test_rmm_modes(dtype, nelem, alloc, managed, pool):
assert rmm.is_initialized()
array_tester(dtype, nelem, alloc)
rmm.reinitialize(pool_allocator=pool, managed_memory=managed)
assert rmm.is_initialized()
array_tester(dtype, nelem, alloc)
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
def test_rmm_csv_log(dtype, nelem, alloc, tmpdir):
suffix = ".csv"
base_name = str(tmpdir.join("rmm_log.csv"))
rmm.reinitialize(logging=True, log_file_name=base_name)
array_tester(dtype, nelem, alloc)
rmm.mr._flush_logs()
# Need to open separately because the device ID is appended to filename
fname = base_name[: -len(suffix)] + ".dev0" + suffix
try:
with open(fname, "rb") as f:
csv = f.read()
assert csv.find(b"Time,Action,Pointer,Size,Stream") >= 0
finally:
os.remove(fname)
@pytest.mark.parametrize("size", [0, 5])
def test_rmm_device_buffer(size):
b = rmm.DeviceBuffer(size=size)
# Test some properties
if size:
assert b.ptr != 0
assert b.size == size
else:
assert b.ptr == 0
assert b.size == 0
assert len(b) == b.size
assert b.nbytes == b.size
assert b.capacity() >= b.size
assert b.__sizeof__() == b.size
# Test `__cuda_array_interface__`
keyset = {"data", "shape", "strides", "typestr", "version"}
assert isinstance(b.__cuda_array_interface__, dict)
assert set(b.__cuda_array_interface__) == keyset
assert b.__cuda_array_interface__["data"] == (b.ptr, False)
assert b.__cuda_array_interface__["shape"] == (b.size,)
assert b.__cuda_array_interface__["strides"] is None
assert b.__cuda_array_interface__["typestr"] == "|u1"
assert b.__cuda_array_interface__["version"] == 0
# Test conversion to bytes
s = b.tobytes()
assert isinstance(s, bytes)
assert len(s) == len(b)
# Test conversion from bytes
b2 = rmm.DeviceBuffer.to_device(s)
assert isinstance(b2, rmm.DeviceBuffer)
assert len(b2) == len(s)
# Test resizing
b.resize(2)
assert b.size == 2
assert b.capacity() >= b.size
@pytest.mark.parametrize(
"hb",
[
b"abc",
bytearray(b"abc"),
memoryview(b"abc"),
np.asarray(memoryview(b"abc")),
np.arange(3, dtype="u1"),
],
)
def test_rmm_device_buffer_memoryview_roundtrip(hb):
mv = memoryview(hb)
db = rmm.DeviceBuffer.to_device(hb)
hb2 = db.copy_to_host()
assert isinstance(hb2, np.ndarray)
mv2 = memoryview(hb2)
assert mv == mv2
hb3a = bytearray(mv.nbytes)
hb3b = db.copy_to_host(hb3a)
assert hb3a is hb3b
mv3 = memoryview(hb3b)
assert mv == mv3
hb4a = np.empty_like(mv)
hb4b = db.copy_to_host(hb4a)
assert hb4a is hb4b
mv4 = memoryview(hb4b)
assert mv == mv4
@pytest.mark.parametrize(
"hb",
[
None,
"abc",
123,
b"",
np.ones((2,), "u2"),
np.ones((2, 2), "u1"),
np.ones(4, "u1")[::2],
b"abc",
bytearray(b"abc"),
memoryview(b"abc"),
np.asarray(memoryview(b"abc")),
np.arange(3, dtype="u1"),
],
)
def test_rmm_device_buffer_bytes_roundtrip(hb):
try:
mv = memoryview(hb)
except TypeError:
with pytest.raises(TypeError):
rmm.DeviceBuffer.to_device(hb)
else:
if mv.format != "B":
with pytest.raises(ValueError):
rmm.DeviceBuffer.to_device(hb)
elif len(mv.strides) != 1:
with pytest.raises(ValueError):
rmm.DeviceBuffer.to_device(hb)
elif mv.strides[0] != 1:
with pytest.raises(ValueError):
rmm.DeviceBuffer.to_device(hb)
else:
db = rmm.DeviceBuffer.to_device(hb)
hb2 = db.tobytes()
mv2 = memoryview(hb2)
assert mv == mv2
hb3 = bytes(db)
mv3 = memoryview(hb3)
assert mv == mv3
@pytest.mark.parametrize(
"hb",
[
b"abc",
bytearray(b"abc"),
memoryview(b"abc"),
np.asarray(memoryview(b"abc")),
np.array([97, 98, 99], dtype="u1"),
],
)
def test_rmm_device_buffer_copy_from_host(hb):
db = rmm.DeviceBuffer.to_device(np.zeros(10, dtype="u1"))
db.copy_from_host(hb)
expected = np.array([97, 98, 99, 0, 0, 0, 0, 0, 0, 0], dtype="u1")
result = db.copy_to_host()
np.testing.assert_equal(expected, result)
@pytest.mark.parametrize(
"cuda_ary",
[
lambda: rmm.DeviceBuffer.to_device(b"abc"),
lambda: cuda.to_device(np.array([97, 98, 99], dtype="u1")),
],
)
def test_rmm_device_buffer_copy_from_device(cuda_ary):
cuda_ary = cuda_ary()
db = rmm.DeviceBuffer.to_device(np.zeros(10, dtype="u1"))
db.copy_from_device(cuda_ary)
expected = np.array([97, 98, 99, 0, 0, 0, 0, 0, 0, 0], dtype="u1")
result = db.copy_to_host()
np.testing.assert_equal(expected, result)
@pytest.mark.parametrize("hb", [b"", b"123", b"abc"])
def test_rmm_device_buffer_pickle_roundtrip(hb):
db = rmm.DeviceBuffer.to_device(hb)
pb = pickle.dumps(db)
del db
db2 = pickle.loads(pb)
hb2 = db2.tobytes()
assert hb == hb2
# out-of-band
db = rmm.DeviceBuffer.to_device(hb)
buffers = []
pb2 = pickle.dumps(db, protocol=5, buffer_callback=buffers.append)
del db
assert len(buffers) == 1
assert isinstance(buffers[0], pickle.PickleBuffer)
assert bytes(buffers[0]) == hb
db3 = pickle.loads(pb2, buffers=buffers)
hb3 = db3.tobytes()
assert hb3 == hb
@pytest.mark.parametrize("stream", [cuda.default_stream(), cuda.stream()])
def test_rmm_pool_numba_stream(stream):
rmm.reinitialize(pool_allocator=True)
stream = rmm._cuda.stream.Stream(stream)
a = rmm._lib.device_buffer.DeviceBuffer(size=3, stream=stream)
assert a.size == 3
assert a.ptr != 0
def test_rmm_cupy_allocator():
cupy = pytest.importorskip("cupy")
m = rmm_cupy_allocator(42)
assert m.mem.size == 42
assert m.mem.ptr != 0
assert isinstance(m.mem._owner, rmm.DeviceBuffer)
m = rmm_cupy_allocator(0)
assert m.mem.size == 0
assert m.mem.ptr == 0
assert isinstance(m.mem._owner, rmm.DeviceBuffer)
cupy.cuda.set_allocator(rmm_cupy_allocator)
a = cupy.arange(10)
assert isinstance(a.data.mem._owner, rmm.DeviceBuffer)
@pytest.mark.parametrize("stream", ["null", "async"])
def test_rmm_pool_cupy_allocator_with_stream(stream):
cupy = pytest.importorskip("cupy")
rmm.reinitialize(pool_allocator=True)
cupy.cuda.set_allocator(rmm_cupy_allocator)
if stream == "null":
stream = cupy.cuda.stream.Stream.null
else:
stream = cupy.cuda.stream.Stream()
with stream:
m = rmm_cupy_allocator(42)
assert m.mem.size == 42
assert m.mem.ptr != 0
assert isinstance(m.mem._owner, rmm.DeviceBuffer)
m = rmm_cupy_allocator(0)
assert m.mem.size == 0
assert m.mem.ptr == 0
assert isinstance(m.mem._owner, rmm.DeviceBuffer)
a = cupy.arange(10)
assert isinstance(a.data.mem._owner, rmm.DeviceBuffer)
# Deleting all allocations known by the RMM pool is required
# before rmm.reinitialize(), otherwise it may segfault.
del a
rmm.reinitialize()
def test_rmm_pool_cupy_allocator_stream_lifetime():
cupy = pytest.importorskip("cupy")
rmm.reinitialize(pool_allocator=True)
cupy.cuda.set_allocator(rmm_cupy_allocator)
stream = cupy.cuda.stream.Stream()
stream.use()
x = cupy.arange(10)
del stream
del x
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
def test_pool_memory_resource(dtype, nelem, alloc):
mr = rmm.mr.PoolMemoryResource(
rmm.mr.CudaMemoryResource(),
initial_pool_size=1 << 22,
maximum_pool_size=1 << 23,
)
rmm.mr.set_current_device_resource(mr)
assert rmm.mr.get_current_device_resource_type() is type(mr)
array_tester(dtype, nelem, alloc)
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
@pytest.mark.parametrize(
"upstream",
[
lambda: rmm.mr.CudaMemoryResource(),
lambda: rmm.mr.ManagedMemoryResource(),
],
)
def test_fixed_size_memory_resource(dtype, nelem, alloc, upstream):
mr = rmm.mr.FixedSizeMemoryResource(
upstream(), block_size=1 << 20, blocks_to_preallocate=128
)
rmm.mr.set_current_device_resource(mr)
assert rmm.mr.get_current_device_resource_type() is type(mr)
array_tester(dtype, nelem, alloc)
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
@pytest.mark.parametrize(
"upstream_mr",
[
lambda: rmm.mr.CudaMemoryResource(),
lambda: rmm.mr.ManagedMemoryResource(),
lambda: rmm.mr.PoolMemoryResource(
rmm.mr.CudaMemoryResource(), 1 << 20
),
],
)
def test_binning_memory_resource(dtype, nelem, alloc, upstream_mr):
upstream = upstream_mr()
# Add fixed-size bins 256KiB, 512KiB, 1MiB, 2MiB, 4MiB
mr = rmm.mr.BinningMemoryResource(upstream, 18, 22)
# Test adding some explicit bin MRs
fixed_mr = rmm.mr.FixedSizeMemoryResource(upstream, 1 << 10)
cuda_mr = rmm.mr.CudaMemoryResource()
mr.add_bin(1 << 10, fixed_mr) # 1KiB bin
mr.add_bin(1 << 23, cuda_mr) # 8MiB bin
rmm.mr.set_current_device_resource(mr)
assert rmm.mr.get_current_device_resource_type() is type(mr)
array_tester(dtype, nelem, alloc)
def test_reinitialize_max_pool_size():
rmm.reinitialize(
pool_allocator=True, initial_pool_size=0, maximum_pool_size=1 << 23
)
rmm.DeviceBuffer().resize((1 << 23) - 1)
def test_reinitialize_max_pool_size_exceeded():
rmm.reinitialize(
pool_allocator=True, initial_pool_size=0, maximum_pool_size=1 << 23
)
with pytest.raises(MemoryError):
rmm.DeviceBuffer().resize(1 << 24)
def test_reinitialize_initial_pool_size_gt_max():
with pytest.raises(RuntimeError) as e:
rmm.reinitialize(
pool_allocator=True,
initial_pool_size=1 << 11,
maximum_pool_size=1 << 10,
)
assert "Initial pool size exceeds the maximum pool size" in str(e.value)
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
def test_rmm_enable_disable_logging(dtype, nelem, alloc, tmpdir):
suffix = ".csv"
base_name = str(tmpdir.join("rmm_log.csv"))
rmm.enable_logging(log_file_name=base_name)
print(rmm.mr.get_per_device_resource(0))
array_tester(dtype, nelem, alloc)
rmm.mr._flush_logs()
# Need to open separately because the device ID is appended to filename
fname = base_name[: -len(suffix)] + ".dev0" + suffix
try:
with open(fname, "rb") as f:
csv = f.read()
assert csv.find(b"Time,Action,Pointer,Size,Stream") >= 0
finally:
os.remove(fname)
rmm.disable_logging()
def test_mr_devicebuffer_lifetime():
# Test ensures MR/Stream lifetime is longer than DeviceBuffer. Even if all
# references go out of scope
# It is necessary to verify that it also works when using an upstream :
# here a Pool MR with the current MR as upstream
rmm.mr.set_current_device_resource(
rmm.mr.PoolMemoryResource(rmm.mr.get_current_device_resource())
)
# Creates a new non-default stream
stream = rmm._cuda.stream.Stream()
# Allocate DeviceBuffer with Pool and Stream
a = rmm.DeviceBuffer(size=10, stream=stream)
# Change current MR. Will cause Pool to go out of scope
rmm.mr.set_current_device_resource(rmm.mr.CudaMemoryResource())
# Force collection to ensure objects are cleaned up
gc.collect()
# Delete a. Used to crash before. Pool MR should still be alive
del a
def test_mr_upstream_lifetime():
# Simple test to ensure upstream MRs are deallocated before downstream MR
cuda_mr = rmm.mr.CudaMemoryResource()
pool_mr = rmm.mr.PoolMemoryResource(cuda_mr)
# Delete cuda_mr first. Should be kept alive by pool_mr
del cuda_mr
del pool_mr
@pytest.mark.skipif(
not _CUDAMALLOC_ASYNC_SUPPORTED,
reason="cudaMallocAsync not supported",
)
@pytest.mark.parametrize("dtype", _dtypes)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
def test_cuda_async_memory_resource(dtype, nelem, alloc):
mr = rmm.mr.CudaAsyncMemoryResource()
rmm.mr.set_current_device_resource(mr)
assert rmm.mr.get_current_device_resource_type() is type(mr)
array_tester(dtype, nelem, alloc)
@pytest.mark.skipif(
not _CUDAMALLOC_ASYNC_SUPPORTED,
reason="cudaMallocAsync not supported",
)
def test_cuda_async_memory_resource_ipc():
# TODO: We don't have a great way to check if IPC is supported in Python,
# without using the C++ function
# rmm::detail::async_alloc::is_export_handle_type_supported. We can't
# accurately test driver and runtime versions for this via Python because
# cuda-python always has the IPC handle enum defined (which normally
# requires a CUDA 11.3 runtime) and the cuda-compat package in Docker
# containers prevents us from assuming that the driver we see actually
# supports IPC handles even if its reported version is new enough (we may
# see a newer driver than what is present on the host). We can only know
# the expected behavior by checking the C++ function mentioned above, which
# is then a redundant check because the CudaAsyncMemoryResource constructor
# follows the same logic. Therefore, we cannot easily ensure this test
# passes in certain expected configurations -- we can only ensure that if
# it fails, it fails in a predictable way.
try:
mr = rmm.mr.CudaAsyncMemoryResource(enable_ipc=True)
except RuntimeError as e:
# CUDA 11.3 is required for IPC memory handle support
assert str(e).endswith(
"Requested IPC memory handle type not supported"
)
else:
rmm.mr.set_current_device_resource(mr)
assert rmm.mr.get_current_device_resource_type() is type(mr)
@pytest.mark.skipif(
not _CUDAMALLOC_ASYNC_SUPPORTED,
reason="cudaMallocAsync not supported",
)
@pytest.mark.parametrize("nelems", _nelems)
def test_cuda_async_memory_resource_stream(nelems):
# test that using CudaAsyncMemoryResource
# with a non-default stream works
mr = rmm.mr.CudaAsyncMemoryResource()
rmm.mr.set_current_device_resource(mr)
stream = rmm._cuda.stream.Stream()
expected = np.full(nelems, 5, dtype="u1")
dbuf = rmm.DeviceBuffer.to_device(expected, stream=stream)
result = np.asarray(dbuf.copy_to_host())
np.testing.assert_equal(expected, result)
@pytest.mark.skipif(
not _CUDAMALLOC_ASYNC_SUPPORTED,
reason="cudaMallocAsync not supported",
)
@pytest.mark.parametrize("nelem", _nelems)
@pytest.mark.parametrize("alloc", _allocs)
def test_cuda_async_memory_resource_threshold(nelem, alloc):
# initial pool size == 0
mr = rmm.mr.CudaAsyncMemoryResource(
initial_pool_size=0, release_threshold=nelem
)
rmm.mr.set_current_device_resource(mr)
array_tester("u1", nelem, alloc) # should not trigger release
array_tester("u1", 2 * nelem, alloc) # should trigger release
@pytest.mark.parametrize(
"mr",
[
rmm.mr.CudaMemoryResource,
pytest.param(
rmm.mr.CudaAsyncMemoryResource,
marks=pytest.mark.skipif(
not _CUDAMALLOC_ASYNC_SUPPORTED,
reason="cudaMallocAsync not supported",
),
),
],
)
def test_limiting_resource_adaptor(mr):
cuda_mr = mr()
allocation_limit = 1 << 20
num_buffers = 2
buffer_size = allocation_limit // num_buffers
mr = rmm.mr.LimitingResourceAdaptor(
cuda_mr, allocation_limit=allocation_limit
)
assert mr.get_allocation_limit() == allocation_limit
rmm.mr.set_current_device_resource(mr)
buffers = [rmm.DeviceBuffer(size=buffer_size) for _ in range(num_buffers)]
assert mr.get_allocated_bytes() == sum(b.size for b in buffers)
with pytest.raises(MemoryError):
rmm.DeviceBuffer(size=1)
def test_statistics_resource_adaptor(stats_mr):
buffers = [rmm.DeviceBuffer(size=1000) for _ in range(10)]
for i in range(9, 0, -2):
del buffers[i]
assert stats_mr.allocation_counts == {
"current_bytes": 5040,
"current_count": 5,
"peak_bytes": 10080,
"peak_count": 10,
"total_bytes": 10080,
"total_count": 10,
}
# Push a new Tracking adaptor
mr2 = rmm.mr.StatisticsResourceAdaptor(stats_mr)
rmm.mr.set_current_device_resource(mr2)
for _ in range(2):
buffers.append(rmm.DeviceBuffer(size=1000))
assert mr2.allocation_counts == {
"current_bytes": 2016,
"current_count": 2,
"peak_bytes": 2016,
"peak_count": 2,
"total_bytes": 2016,
"total_count": 2,
}
assert stats_mr.allocation_counts == {
"current_bytes": 7056,
"current_count": 7,
"peak_bytes": 10080,
"peak_count": 10,
"total_bytes": 12096,
"total_count": 12,
}
del buffers
gc.collect()
assert mr2.allocation_counts == {
"current_bytes": 0,
"current_count": 0,
"peak_bytes": 2016,
"peak_count": 2,
"total_bytes": 2016,
"total_count": 2,
}
assert stats_mr.allocation_counts == {
"current_bytes": 0,
"current_count": 0,
"peak_bytes": 10080,
"peak_count": 10,
"total_bytes": 12096,
"total_count": 12,
}
gc.collect()
def test_tracking_resource_adaptor():
cuda_mr = rmm.mr.CudaMemoryResource()
mr = rmm.mr.TrackingResourceAdaptor(cuda_mr, capture_stacks=True)
rmm.mr.set_current_device_resource(mr)
buffers = [rmm.DeviceBuffer(size=1000) for _ in range(10)]
for i in range(9, 0, -2):
del buffers[i]
assert mr.get_allocated_bytes() == 5040
# Push a new Tracking adaptor
mr2 = rmm.mr.TrackingResourceAdaptor(mr, capture_stacks=True)
rmm.mr.set_current_device_resource(mr2)
for _ in range(2):
buffers.append(rmm.DeviceBuffer(size=1000))
assert mr2.get_allocated_bytes() == 2016
assert mr.get_allocated_bytes() == 7056
# Ensure we get back a non-empty string for the allocations
assert len(mr.get_outstanding_allocations_str()) > 0
del buffers
gc.collect()
assert mr2.get_allocated_bytes() == 0
assert mr.get_allocated_bytes() == 0
# make sure the allocations string is now empty
assert len(mr2.get_outstanding_allocations_str()) == 0
assert len(mr.get_outstanding_allocations_str()) == 0
def test_failure_callback_resource_adaptor():
retried = [False]
def callback(nbytes: int) -> bool:
if retried[0]:
return False
else:
retried[0] = True
return True
cuda_mr = rmm.mr.CudaMemoryResource()
mr = rmm.mr.FailureCallbackResourceAdaptor(cuda_mr, callback)
rmm.mr.set_current_device_resource(mr)
with pytest.raises(MemoryError):
rmm.DeviceBuffer(size=int(1e11))
assert retried[0]
def test_failure_callback_resource_adaptor_error():
def callback(nbytes: int) -> bool:
raise RuntimeError("MyError")
cuda_mr = rmm.mr.CudaMemoryResource()
mr = rmm.mr.FailureCallbackResourceAdaptor(cuda_mr, callback)
rmm.mr.set_current_device_resource(mr)
with pytest.raises(RuntimeError, match="MyError"):
rmm.DeviceBuffer(size=int(1e11))
def test_dev_buf_circle_ref_dealloc():
# This test creates a reference cycle containing a `DeviceBuffer`
# and ensures that the garbage collector does not clear it, i.e.,
# that the GC does not remove all references to other Python
# objects from it. The `DeviceBuffer` needs to keep its reference
# to the `DeviceMemoryResource` that was used to create it in
# order to be cleaned up properly. See GH #931.
rmm.mr.set_current_device_resource(rmm.mr.CudaMemoryResource())
dbuf1 = rmm.DeviceBuffer(size=1_000_000)
# Make dbuf1 part of a reference cycle:
l1 = [dbuf1]
l1.append(l1)
# due to the reference cycle, the device buffer doesn't actually get
# cleaned up until after `gc.collect()` is called.
del dbuf1, l1
rmm.mr.set_current_device_resource(rmm.mr.CudaMemoryResource())
# test that after the call to `gc.collect()`, the `DeviceBuffer`
# is deallocated successfully (i.e., without a segfault).
gc.collect()
def test_upstream_mr_circle_ref_dealloc():
# This test is just like the one above, except it tests that
# instances of `UpstreamResourceAdaptor` (such as
# `PoolMemoryResource`) are not cleared by the GC.
rmm.mr.set_current_device_resource(rmm.mr.CudaMemoryResource())
mr = rmm.mr.PoolMemoryResource(rmm.mr.get_current_device_resource())
l1 = [mr]
l1.append(l1)
del mr, l1
rmm.mr.set_current_device_resource(rmm.mr.CudaMemoryResource())
gc.collect()
def test_mr_allocate_deallocate():
mr = rmm.mr.TrackingResourceAdaptor(rmm.mr.get_current_device_resource())
size = 1 << 23 # 8 MiB
ptr = mr.allocate(size)
assert mr.get_allocated_bytes() == 1 << 23
mr.deallocate(ptr, size)
assert mr.get_allocated_bytes() == 0
def test_custom_mr(capsys):
base_mr = rmm.mr.CudaMemoryResource()
def allocate_func(size):
print(f"Allocating {size} bytes")
return base_mr.allocate(size)
def deallocate_func(ptr, size):
print(f"Deallocating {size} bytes")
return base_mr.deallocate(ptr, size)
rmm.mr.set_current_device_resource(
rmm.mr.CallbackMemoryResource(allocate_func, deallocate_func)
)
rmm.DeviceBuffer(size=256)
captured = capsys.readouterr()
assert captured.out == "Allocating 256 bytes\nDeallocating 256 bytes\n"
@pytest.mark.parametrize(
"err_raise,err_catch",
[
(MemoryError, MemoryError),
(RuntimeError, RuntimeError),
(Exception, RuntimeError),
(BaseException, RuntimeError),
],
)
def test_callback_mr_error(err_raise, err_catch):
base_mr = rmm.mr.CudaMemoryResource()
def allocate_func(size):
raise err_raise("My alloc error")
def deallocate_func(ptr, size):
return base_mr.deallocate(ptr, size)
rmm.mr.set_current_device_resource(
rmm.mr.CallbackMemoryResource(allocate_func, deallocate_func)
)
with pytest.raises(err_catch, match="My alloc error"):
rmm.DeviceBuffer(size=256)
@pytest.fixture
def make_reinit_hook():
funcs = []
def _make_reinit_hook(func, *args, **kwargs):
funcs.append(func)
rmm.register_reinitialize_hook(func, *args, **kwargs)
return func
yield _make_reinit_hook
for func in funcs:
rmm.unregister_reinitialize_hook(func)
def test_reinit_hooks_register(make_reinit_hook):
L = []
make_reinit_hook(lambda: L.append(1))
make_reinit_hook(lambda: L.append(2))
make_reinit_hook(lambda x: L.append(x), 3)
rmm.reinitialize()
assert L == [3, 2, 1]
def test_reinit_hooks_unregister(make_reinit_hook):
L = []
one = make_reinit_hook(lambda: L.append(1))
make_reinit_hook(lambda: L.append(2))
rmm.unregister_reinitialize_hook(one)
rmm.reinitialize()
assert L == [2]
def test_reinit_hooks_register_twice(make_reinit_hook):
L = []
def func_with_arg(x):
L.append(x)
def func_without_arg():
L.append(2)
make_reinit_hook(func_with_arg, 1)
make_reinit_hook(func_without_arg)
make_reinit_hook(func_with_arg, 3)
make_reinit_hook(func_without_arg)
rmm.reinitialize()
assert L == [2, 3, 2, 1]
def test_reinit_hooks_unregister_twice_registered(make_reinit_hook):
# unregistering a twice-registered function
# should unregister both instances:
L = []
def func_with_arg(x):
L.append(x)
make_reinit_hook(func_with_arg, 1)
make_reinit_hook(lambda: L.append(2))
make_reinit_hook(func_with_arg, 3)
rmm.unregister_reinitialize_hook(func_with_arg)
rmm.reinitialize()
assert L == [2]
@pytest.mark.parametrize(
"cuda_ary",
[
lambda: rmm.DeviceBuffer.to_device(b"abc"),
lambda: cuda.to_device(np.array([97, 98, 99, 0, 0], dtype="u1")),
],
)
@pytest.mark.parametrize(
"make_copy", [lambda db: db.copy(), lambda db: copy.copy(db)]
)
def test_rmm_device_buffer_copy(cuda_ary, make_copy):
cuda_ary = cuda_ary()
db = rmm.DeviceBuffer.to_device(np.zeros(5, dtype="u1"))
db.copy_from_device(cuda_ary)
db_copy = make_copy(db)
assert db is not db_copy
assert db.ptr != db_copy.ptr
assert len(db) == len(db_copy)
expected = np.array([97, 98, 99, 0, 0], dtype="u1")
result = db_copy.copy_to_host()
np.testing.assert_equal(expected, result)
@pytest.mark.parametrize("level", rmm.logging_level)
def test_valid_logging_level(level):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="RMM will not log logging_level.TRACE."
)
warnings.filterwarnings(
"ignore", message="RMM will not log logging_level.DEBUG."
)
rmm.set_logging_level(level)
assert rmm.get_logging_level() == level
rmm.set_logging_level(rmm.logging_level.INFO) # reset to default
rmm.set_flush_level(level)
assert rmm.get_flush_level() == level
rmm.set_flush_level(rmm.logging_level.INFO) # reset to default
rmm.should_log(level)
@pytest.mark.parametrize(
"level", ["INFO", 3, "invalid", 100, None, 1.2345, [1, 2, 3]]
)
def test_invalid_logging_level(level):
with pytest.raises(TypeError):
rmm.set_logging_level(level)
with pytest.raises(TypeError):
rmm.set_flush_level(level)
with pytest.raises(TypeError):
rmm.should_log(level)
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/tests/conftest.py
|
import pytest
import rmm
@pytest.fixture(scope="function", autouse=True)
def rmm_auto_reinitialize():
# Run the test
yield
# Automatically reinitialize the current memory resource after running each
# test
rmm.reinitialize()
@pytest.fixture
def stats_mr():
mr = rmm.mr.StatisticsResourceAdaptor(rmm.mr.CudaMemoryResource())
rmm.mr.set_current_device_resource(mr)
return mr
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/tests/test_cython.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import importlib
import sys
def py_func(func):
"""
Wraps func in a plain Python function.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
cython_test_modules = ["rmm._lib.tests.test_device_buffer"]
for mod in cython_test_modules:
try:
# For each callable in `mod` with name `test_*`,
# wrap the callable in a plain Python function
# and set the result as an attribute of this module.
mod = importlib.import_module(mod)
for name in dir(mod):
item = getattr(mod, name)
if callable(item) and name.startswith("test_"):
item = py_func(item)
setattr(sys.modules[__name__], name, item)
except ImportError:
pass
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/cuda_stream.pxd
|
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cimport cython
from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from rmm._lib.cuda_stream_view cimport cuda_stream_view
cdef extern from "rmm/cuda_stream.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream:
cuda_stream() except +
bool is_valid() except +
cudaStream_t value() except +
cuda_stream_view view() except +
void synchronize() except +
void synchronize_no_throw()
@cython.final
cdef class CudaStream:
cdef unique_ptr[cuda_stream] c_obj
cdef cudaStream_t value(self) except * nogil
cdef bool is_valid(self) except * nogil
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/device_uvector.pxd
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm._lib.device_buffer cimport device_buffer
from rmm._lib.memory_resource cimport device_memory_resource
cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_uvector[T]:
device_uvector(size_t size, cuda_stream_view stream) except +
T* element_ptr(size_t index)
void set_element(size_t element_index, const T& v, cuda_stream_view s)
void set_element_async(
size_t element_index,
const T& v,
cuda_stream_view s
) except +
T front_element(cuda_stream_view s) except +
T back_element(cuda_stream_view s) except +
void reserve(size_t new_capacity, cuda_stream_view stream) except +
void resize(size_t new_size, cuda_stream_view stream) except +
void shrink_to_fit(cuda_stream_view stream) except +
device_buffer release()
size_t capacity()
T* data()
size_t size()
device_memory_resource* memory_resource()
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/lib.pxd
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libc.stdint cimport uintptr_t
from libcpp cimport bool
from libcpp.utility cimport pair
from libcpp.vector cimport vector
ctypedef pair[const char*, unsigned int] caller_pair
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/memory_resource.pxd
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libc.stdint cimport int8_t
from libcpp.memory cimport shared_ptr
from libcpp.string cimport string
from libcpp.vector cimport vector
from rmm._lib.cuda_stream_view cimport cuda_stream_view
cdef extern from "rmm/mr/device/device_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass device_memory_resource:
void* allocate(size_t bytes) except +
void* allocate(size_t bytes, cuda_stream_view stream) except +
void deallocate(void* ptr, size_t bytes) except +
void deallocate(
void* ptr,
size_t bytes,
cuda_stream_view stream
) except +
cdef class DeviceMemoryResource:
cdef shared_ptr[device_memory_resource] c_obj
cdef device_memory_resource* get_mr(self)
cdef class UpstreamResourceAdaptor(DeviceMemoryResource):
cdef readonly DeviceMemoryResource upstream_mr
cpdef DeviceMemoryResource get_upstream(self)
cdef class CudaMemoryResource(DeviceMemoryResource):
pass
cdef class ManagedMemoryResource(DeviceMemoryResource):
pass
cdef class CudaAsyncMemoryResource(DeviceMemoryResource):
pass
cdef class PoolMemoryResource(UpstreamResourceAdaptor):
pass
cdef class FixedSizeMemoryResource(UpstreamResourceAdaptor):
pass
cdef class BinningMemoryResource(UpstreamResourceAdaptor):
cdef readonly list _bin_mrs
cpdef add_bin(
self,
size_t allocation_size,
DeviceMemoryResource bin_resource=*)
cdef class CallbackMemoryResource(DeviceMemoryResource):
cdef object _allocate_func
cdef object _deallocate_func
cdef class LimitingResourceAdaptor(UpstreamResourceAdaptor):
pass
cdef class LoggingResourceAdaptor(UpstreamResourceAdaptor):
cdef object _log_file_name
cpdef get_file_name(self)
cpdef flush(self)
cdef class StatisticsResourceAdaptor(UpstreamResourceAdaptor):
pass
cdef class TrackingResourceAdaptor(UpstreamResourceAdaptor):
pass
cdef class FailureCallbackResourceAdaptor(UpstreamResourceAdaptor):
cdef object _callback
cpdef DeviceMemoryResource get_current_device_resource()
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources device_buffer.pyx lib.pyx logger.pyx memory_resource.pyx cuda_stream.pyx
torch_allocator.pyx)
set(linked_libraries rmm::rmm)
# Build all of the Cython targets
rapids_cython_create_modules(SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}"
CXX)
# The cdef public functions in this file need to have a C ABI
target_compile_definitions(torch_allocator PRIVATE CYTHON_EXTERN_C=extern\ "C")
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/memory_resource.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
# This import is needed for Cython typing in translate_python_except_to_cpp
# See https://github.com/cython/cython/issues/5589
from builtins import BaseException
from collections import defaultdict
cimport cython
from cython.operator cimport dereference as deref
from libc.stddef cimport size_t
from libc.stdint cimport int8_t, int64_t, uintptr_t
from libcpp cimport bool
from libcpp.memory cimport make_unique, unique_ptr
from libcpp.pair cimport pair
from libcpp.string cimport string
from cuda.cudart import cudaError_t
from rmm._cuda.gpu import CUDARuntimeError, getDevice, setDevice
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm._lib.per_device_resource cimport (
cuda_device_id,
set_per_device_resource as cpp_set_per_device_resource,
)
# Transparent handle of a C++ exception
ctypedef pair[int, string] CppExcept
cdef CppExcept translate_python_except_to_cpp(err: BaseException) noexcept:
"""Translate a Python exception into a C++ exception handle
The returned exception handle can then be thrown by `throw_cpp_except()`,
which MUST be done without holding the GIL.
This is useful when C++ calls a Python function and needs to catch or
propagate exceptions.
"""
if isinstance(err, MemoryError):
return CppExcept(0, str.encode(str(err)))
return CppExcept(-1, str.encode(str(err)))
# Implementation of `throw_cpp_except()`, which throws a given `CppExcept`.
# This function MUST be called without the GIL otherwise the thrown C++
# exception are translated back into a Python exception.
cdef extern from *:
"""
#include <stdexcept>
#include <utility>
void throw_cpp_except(std::pair<int, std::string> res) {
switch(res.first) {
case 0:
throw rmm::out_of_memory(res.second);
default:
throw std::runtime_error(res.second);
}
}
"""
void throw_cpp_except(CppExcept) nogil
# NOTE: Keep extern declarations in .pyx file as much as possible to avoid
# leaking dependencies when importing RMM Cython .pxd files
cdef extern from "thrust/optional.h" namespace "thrust" nogil:
struct nullopt_t:
pass
cdef nullopt_t nullopt
cdef cppclass optional[T]:
optional()
optional(T v)
cdef optional[T] make_optional[T](T v)
cdef extern from "rmm/mr/device/cuda_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass cuda_memory_resource(device_memory_resource):
cuda_memory_resource() except +
cdef extern from "rmm/mr/device/managed_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass managed_memory_resource(device_memory_resource):
managed_memory_resource() except +
cdef extern from "rmm/mr/device/cuda_async_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass cuda_async_memory_resource(device_memory_resource):
cuda_async_memory_resource(
optional[size_t] initial_pool_size,
optional[size_t] release_threshold,
optional[allocation_handle_type] export_handle_type) except +
# TODO: when we adopt Cython 3.0 use enum class
cdef extern from "rmm/mr/device/cuda_async_memory_resource.hpp" \
namespace \
"rmm::mr::cuda_async_memory_resource::allocation_handle_type" \
nogil:
enum allocation_handle_type \
"rmm::mr::cuda_async_memory_resource::allocation_handle_type":
none
posix_file_descriptor
win32
win32_kmt
cdef extern from "rmm/mr/device/pool_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass pool_memory_resource[Upstream](device_memory_resource):
pool_memory_resource(
Upstream* upstream_mr,
optional[size_t] initial_pool_size,
optional[size_t] maximum_pool_size) except +
size_t pool_size()
cdef extern from "rmm/mr/device/fixed_size_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass fixed_size_memory_resource[Upstream](device_memory_resource):
fixed_size_memory_resource(
Upstream* upstream_mr,
size_t block_size,
size_t block_to_preallocate) except +
cdef extern from "rmm/mr/device/callback_memory_resource.hpp" \
namespace "rmm::mr" nogil:
ctypedef void* (*allocate_callback_t)(size_t, void*)
ctypedef void (*deallocate_callback_t)(void*, size_t, void*)
cdef cppclass callback_memory_resource(device_memory_resource):
callback_memory_resource(
allocate_callback_t allocate_callback,
deallocate_callback_t deallocate_callback,
void* allocate_callback_arg,
void* deallocate_callback_arg
) except +
cdef extern from "rmm/mr/device/binning_memory_resource.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass binning_memory_resource[Upstream](device_memory_resource):
binning_memory_resource(Upstream* upstream_mr) except +
binning_memory_resource(
Upstream* upstream_mr,
int8_t min_size_exponent,
int8_t max_size_exponent) except +
void add_bin(size_t allocation_size) except +
void add_bin(
size_t allocation_size,
device_memory_resource* bin_resource) except +
cdef extern from "rmm/mr/device/limiting_resource_adaptor.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass limiting_resource_adaptor[Upstream](device_memory_resource):
limiting_resource_adaptor(
Upstream* upstream_mr,
size_t allocation_limit) except +
size_t get_allocated_bytes() except +
size_t get_allocation_limit() except +
cdef extern from "rmm/mr/device/logging_resource_adaptor.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass logging_resource_adaptor[Upstream](device_memory_resource):
logging_resource_adaptor(
Upstream* upstream_mr,
string filename) except +
void flush() except +
cdef extern from "rmm/mr/device/statistics_resource_adaptor.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass statistics_resource_adaptor[Upstream](
device_memory_resource):
struct counter:
counter()
int64_t value
int64_t peak
int64_t total
statistics_resource_adaptor(
Upstream* upstream_mr) except +
counter get_bytes_counter() except +
counter get_allocations_counter() except +
cdef extern from "rmm/mr/device/tracking_resource_adaptor.hpp" \
namespace "rmm::mr" nogil:
cdef cppclass tracking_resource_adaptor[Upstream](device_memory_resource):
tracking_resource_adaptor(
Upstream* upstream_mr,
bool capture_stacks) except +
size_t get_allocated_bytes() except +
string get_outstanding_allocations_str() except +
void log_outstanding_allocations() except +
cdef extern from "rmm/mr/device/failure_callback_resource_adaptor.hpp" \
namespace "rmm::mr" nogil:
ctypedef bool (*failure_callback_t)(size_t, void*)
cdef cppclass failure_callback_resource_adaptor[Upstream](
device_memory_resource
):
failure_callback_resource_adaptor(
Upstream* upstream_mr,
failure_callback_t callback,
void* callback_arg
) except +
cdef class DeviceMemoryResource:
cdef device_memory_resource* get_mr(self):
"""Get the underlying C++ memory resource object."""
return self.c_obj.get()
def allocate(self, size_t nbytes):
"""Allocate ``nbytes`` bytes of memory.
Parameters
----------
nbytes : size_t
The size of the allocation in bytes
"""
return <uintptr_t>self.c_obj.get().allocate(nbytes)
def deallocate(self, uintptr_t ptr, size_t nbytes):
"""Deallocate memory pointed to by ``ptr`` of size ``nbytes``.
Parameters
----------
ptr : uintptr_t
Pointer to be deallocated
nbytes : size_t
Size of the allocation in bytes
"""
self.c_obj.get().deallocate(<void*>(ptr), nbytes)
# See the note about `no_gc_clear` in `device_buffer.pyx`.
@cython.no_gc_clear
cdef class UpstreamResourceAdaptor(DeviceMemoryResource):
"""Parent class for all memory resources that track an upstream.
Upstream resource tracking requires maintaining a reference to the upstream
mr so that it is kept alive and may be accessed by any downstream resource
adaptors.
"""
def __cinit__(self, DeviceMemoryResource upstream_mr, *args, **kwargs):
if (upstream_mr is None):
raise Exception("Argument `upstream_mr` must not be None")
self.upstream_mr = upstream_mr
def __dealloc__(self):
# Must cleanup the base MR before any upstream MR
self.c_obj.reset()
cpdef DeviceMemoryResource get_upstream(self):
return self.upstream_mr
cdef class CudaMemoryResource(DeviceMemoryResource):
def __cinit__(self):
self.c_obj.reset(
new cuda_memory_resource()
)
def __init__(self):
"""
Memory resource that uses ``cudaMalloc``/``cudaFree`` for
allocation/deallocation.
"""
pass
cdef class CudaAsyncMemoryResource(DeviceMemoryResource):
"""
Memory resource that uses ``cudaMallocAsync``/``cudaFreeAsync`` for
allocation/deallocation.
Parameters
----------
initial_pool_size : int, optional
Initial pool size in bytes. By default, half the available memory
on the device is used.
release_threshold: int, optional
Release threshold in bytes. If the pool size grows beyond this
value, unused memory held by the pool will be released at the
next synchronization point.
enable_ipc: bool, optional
If True, enables export of POSIX file descriptor handles for the memory
allocated by this resource so that it can be used with CUDA IPC.
"""
def __cinit__(
self,
initial_pool_size=None,
release_threshold=None,
enable_ipc=False
):
cdef optional[size_t] c_initial_pool_size = (
optional[size_t]()
if initial_pool_size is None
else optional[size_t](initial_pool_size)
)
cdef optional[size_t] c_release_threshold = (
optional[size_t]()
if release_threshold is None
else optional[size_t](release_threshold)
)
# If IPC memory handles are not supported, the constructor below will
# raise an error from C++.
cdef optional[allocation_handle_type] c_export_handle_type = (
optional[allocation_handle_type](
posix_file_descriptor
)
if enable_ipc
else optional[allocation_handle_type]()
)
self.c_obj.reset(
new cuda_async_memory_resource(
c_initial_pool_size,
c_release_threshold,
c_export_handle_type
)
)
cdef class ManagedMemoryResource(DeviceMemoryResource):
def __cinit__(self):
self.c_obj.reset(
new managed_memory_resource()
)
def __init__(self):
"""
Memory resource that uses ``cudaMallocManaged``/``cudaFree`` for
allocation/deallocation.
"""
pass
cdef class PoolMemoryResource(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr,
initial_pool_size=None,
maximum_pool_size=None
):
cdef optional[size_t] c_initial_pool_size
cdef optional[size_t] c_maximum_pool_size
c_initial_pool_size = (
optional[size_t]() if
initial_pool_size is None
else make_optional[size_t](initial_pool_size)
)
c_maximum_pool_size = (
optional[size_t]() if
maximum_pool_size is None
else make_optional[size_t](maximum_pool_size)
)
self.c_obj.reset(
new pool_memory_resource[device_memory_resource](
upstream_mr.get_mr(),
c_initial_pool_size,
c_maximum_pool_size
)
)
def __init__(
self,
DeviceMemoryResource upstream_mr,
object initial_pool_size=None,
object maximum_pool_size=None
):
"""
Coalescing best-fit suballocator which uses a pool of memory allocated
from an upstream memory resource.
Parameters
----------
upstream_mr : DeviceMemoryResource
The DeviceMemoryResource from which to allocate blocks for the
pool.
initial_pool_size : int, optional
Initial pool size in bytes. By default, half the available memory
on the device is used.
maximum_pool_size : int, optional
Maximum size in bytes, that the pool can grow to.
"""
pass
def pool_size(self):
cdef pool_memory_resource[device_memory_resource]* c_mr = (
<pool_memory_resource[device_memory_resource]*>(self.get_mr())
)
return c_mr.pool_size()
cdef class FixedSizeMemoryResource(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr,
size_t block_size=1<<20,
size_t blocks_to_preallocate=128
):
self.c_obj.reset(
new fixed_size_memory_resource[device_memory_resource](
upstream_mr.get_mr(),
block_size,
blocks_to_preallocate
)
)
def __init__(
self,
DeviceMemoryResource upstream_mr,
size_t block_size=1<<20,
size_t blocks_to_preallocate=128
):
"""
Memory resource which allocates memory blocks of a single fixed size.
Parameters
----------
upstream_mr : DeviceMemoryResource
The DeviceMemoryResource from which to allocate blocks for the
pool.
block_size : int, optional
The size of blocks to allocate (default is 1MiB).
blocks_to_preallocate : int, optional
The number of blocks to allocate to initialize the pool.
Notes
-----
Supports only allocations of size smaller than the configured
block_size.
"""
pass
cdef class BinningMemoryResource(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr,
int8_t min_size_exponent=-1,
int8_t max_size_exponent=-1,
):
self._bin_mrs = []
if (min_size_exponent == -1 or max_size_exponent == -1):
self.c_obj.reset(
new binning_memory_resource[device_memory_resource](
upstream_mr.get_mr()
)
)
else:
self.c_obj.reset(
new binning_memory_resource[device_memory_resource](
upstream_mr.get_mr(),
min_size_exponent,
max_size_exponent
)
)
def __dealloc__(self):
# Must cleanup the base MR before any upstream or referenced Bins
self.c_obj.reset()
def __init__(
self,
DeviceMemoryResource upstream_mr,
int8_t min_size_exponent=-1,
int8_t max_size_exponent=-1,
):
"""
Allocates memory from a set of specified "bin" sizes based on a
specified allocation size.
If min_size_exponent and max_size_exponent are specified, initializes
with one or more FixedSizeMemoryResource bins in the range
``[2**min_size_exponent, 2**max_size_exponent]``.
Call :py:meth:`~.add_bin` to add additional bin allocators.
Parameters
----------
upstream_mr : DeviceMemoryResource
The memory resource to use for allocations larger than any of the
bins.
min_size_exponent : size_t
The base-2 exponent of the minimum size FixedSizeMemoryResource
bin to create.
max_size_exponent : size_t
The base-2 exponent of the maximum size FixedSizeMemoryResource
bin to create.
"""
pass
cpdef add_bin(
self,
size_t allocation_size,
DeviceMemoryResource bin_resource=None
):
"""
Adds a bin of the specified maximum allocation size to this memory
resource. If specified, uses bin_resource for allocation for this bin.
If not specified, creates and uses a FixedSizeMemoryResource for
allocation for this bin.
Allocations smaller than allocation_size and larger than the next
smaller bin size will use this fixed-size memory resource.
Parameters
----------
allocation_size : size_t
The maximum allocation size in bytes for the created bin
bin_resource : DeviceMemoryResource
The resource to use for this bin (optional)
"""
if bin_resource is None:
(<binning_memory_resource[device_memory_resource]*>(
self.c_obj.get()))[0].add_bin(allocation_size)
else:
# Save the ref to the new bin resource to ensure its lifetime
self._bin_mrs.append(bin_resource)
(<binning_memory_resource[device_memory_resource]*>(
self.c_obj.get()))[0].add_bin(
allocation_size,
bin_resource.get_mr())
@property
def bin_mrs(self) -> list:
"""Get the list of binned memory resources."""
return self._bin_mrs
cdef void* _allocate_callback_wrapper(
size_t nbytes,
cuda_stream_view stream,
void* ctx
# Note that this function is specifically designed to rethrow Python
# exceptions as C++ exceptions when called as a callback from C++, so it is
# noexcept from Cython's perspective.
) noexcept nogil:
cdef CppExcept err
with gil:
try:
return <void*><uintptr_t>((<object>ctx)(nbytes))
except BaseException as e:
err = translate_python_except_to_cpp(e)
throw_cpp_except(err)
cdef void _deallocate_callback_wrapper(
void* ptr,
size_t nbytes,
cuda_stream_view stream,
void* ctx
) except * with gil:
(<object>ctx)(<uintptr_t>(ptr), nbytes)
cdef class CallbackMemoryResource(DeviceMemoryResource):
"""
A memory resource that uses the user-provided callables to do
memory allocation and deallocation.
``CallbackMemoryResource`` should really only be used for
debugging memory issues, as there is a significant performance
penalty associated with using a Python function for each memory
allocation and deallocation.
Parameters
----------
allocate_func: callable
The allocation function must accept a single integer argument,
representing the number of bytes to allocate, and return an
integer representing the pointer to the allocated memory.
deallocate_func: callable
The deallocation function must accept two arguments, an integer
representing the pointer to the memory to free, and a second
integer representing the number of bytes to free.
Examples
--------
>>> import rmm
>>> base_mr = rmm.mr.CudaMemoryResource()
>>> def allocate_func(size):
... print(f"Allocating {size} bytes")
... return base_mr.allocate(size)
...
>>> def deallocate_func(ptr, size):
... print(f"Deallocating {size} bytes")
... return base_mr.deallocate(ptr, size)
...
>>> rmm.mr.set_current_device_resource(
rmm.mr.CallbackMemoryResource(allocate_func, deallocate_func)
)
>>> dbuf = rmm.DeviceBuffer(size=256)
Allocating 256 bytes
>>> del dbuf
Deallocating 256 bytes
"""
def __init__(
self,
allocate_func,
deallocate_func,
):
self._allocate_func = allocate_func
self._deallocate_func = deallocate_func
self.c_obj.reset(
new callback_memory_resource(
<allocate_callback_t>(_allocate_callback_wrapper),
<deallocate_callback_t>(_deallocate_callback_wrapper),
<void*>(allocate_func),
<void*>(deallocate_func)
)
)
def _append_id(filename, id):
"""
Append ".dev<ID>" onto a filename before the extension
Example: _append_id("hello.txt", 1) returns "hello.dev1.txt"
Parameters
----------
filename : string
The filename, possibly with extension
id : int
The ID to append
"""
name, ext = os.path.splitext(filename)
return f"{name}.dev{id}{ext}"
cdef class LimitingResourceAdaptor(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr,
size_t allocation_limit
):
self.c_obj.reset(
new limiting_resource_adaptor[device_memory_resource](
upstream_mr.get_mr(),
allocation_limit
)
)
def __init__(
self,
DeviceMemoryResource upstream_mr,
size_t allocation_limit
):
"""
Memory resource that limits the total allocation amount possible
performed by an upstream memory resource.
Parameters
----------
upstream_mr : DeviceMemoryResource
The upstream memory resource.
allocation_limit : size_t
Maximum memory allowed for this allocator.
"""
pass
def get_allocated_bytes(self) -> size_t:
"""
Query the number of bytes that have been allocated. Note that this can
not be used to know how large of an allocation is possible due to both
possible fragmentation and also internal page sizes and alignment that
is not tracked by this allocator.
"""
return (<limiting_resource_adaptor[device_memory_resource]*>(
self.c_obj.get())
)[0].get_allocated_bytes()
def get_allocation_limit(self) -> size_t:
"""
Query the maximum number of bytes that this allocator is allowed to
allocate. This is the limit on the allocator and not a representation
of the underlying device. The device may not be able to support this
limit.
"""
return (<limiting_resource_adaptor[device_memory_resource]*>(
self.c_obj.get())
)[0].get_allocation_limit()
cdef class LoggingResourceAdaptor(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr,
object log_file_name=None
):
if log_file_name is None:
log_file_name = os.getenv("RMM_LOG_FILE")
if not log_file_name:
raise ValueError(
"RMM log file must be specified either using "
"log_file_name= argument or RMM_LOG_FILE "
"environment variable"
)
# Append the device ID before the file extension
log_file_name = _append_id(
log_file_name, getDevice()
)
log_file_name = os.path.abspath(log_file_name)
self._log_file_name = log_file_name
self.c_obj.reset(
new logging_resource_adaptor[device_memory_resource](
upstream_mr.get_mr(),
log_file_name.encode()
)
)
def __init__(
self,
DeviceMemoryResource upstream_mr,
object log_file_name=None
):
"""
Memory resource that logs information about allocations/deallocations
performed by an upstream memory resource.
Parameters
----------
upstream : DeviceMemoryResource
The upstream memory resource.
log_file_name : str
Path to the file to which logs are written.
"""
pass
cpdef flush(self):
(<logging_resource_adaptor[device_memory_resource]*>(
self.get_mr()))[0].flush()
cpdef get_file_name(self):
return self._log_file_name
def __dealloc__(self):
self.c_obj.reset()
cdef class StatisticsResourceAdaptor(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr
):
self.c_obj.reset(
new statistics_resource_adaptor[device_memory_resource](
upstream_mr.get_mr()
)
)
def __init__(
self,
DeviceMemoryResource upstream_mr
):
"""
Memory resource that tracks the current, peak and total
allocations/deallocations performed by an upstream memory resource.
Includes the ability to query these statistics at any time.
Parameters
----------
upstream : DeviceMemoryResource
The upstream memory resource.
"""
pass
@property
def allocation_counts(self) -> dict:
"""
Gets the current, peak, and total allocated bytes and number of
allocations.
The dictionary keys are ``current_bytes``, ``current_count``,
``peak_bytes``, ``peak_count``, ``total_bytes``, and ``total_count``.
Returns:
dict: Dictionary containing allocation counts and bytes.
"""
counts = (<statistics_resource_adaptor[device_memory_resource]*>(
self.c_obj.get()))[0].get_allocations_counter()
byte_counts = (<statistics_resource_adaptor[device_memory_resource]*>(
self.c_obj.get()))[0].get_bytes_counter()
return {
"current_bytes": byte_counts.value,
"current_count": counts.value,
"peak_bytes": byte_counts.peak,
"peak_count": counts.peak,
"total_bytes": byte_counts.total,
"total_count": counts.total,
}
cdef class TrackingResourceAdaptor(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr,
bool capture_stacks=False
):
self.c_obj.reset(
new tracking_resource_adaptor[device_memory_resource](
upstream_mr.get_mr(),
capture_stacks
)
)
def __init__(
self,
DeviceMemoryResource upstream_mr,
bool capture_stacks=False
):
"""
Memory resource that logs tracks allocations/deallocations
performed by an upstream memory resource. Includes the ability to
query all outstanding allocations with the stack trace, if desired.
Parameters
----------
upstream : DeviceMemoryResource
The upstream memory resource.
capture_stacks : bool
Whether or not to capture the stack trace with each allocation.
"""
pass
def get_allocated_bytes(self) -> size_t:
"""
Query the number of bytes that have been allocated. Note that this can
not be used to know how large of an allocation is possible due to both
possible fragmentation and also internal page sizes and alignment that
is not tracked by this allocator.
"""
return (<tracking_resource_adaptor[device_memory_resource]*>(
self.c_obj.get())
)[0].get_allocated_bytes()
def get_outstanding_allocations_str(self) -> str:
"""
Returns a string containing information about the current outstanding
allocations. For each allocation, the address, size and optional
stack trace are shown.
"""
return (<tracking_resource_adaptor[device_memory_resource]*>(
self.c_obj.get())
)[0].get_outstanding_allocations_str().decode('UTF-8')
def log_outstanding_allocations(self):
"""
Logs the output of `get_outstanding_allocations_str` to the current
RMM log file if enabled.
"""
(<tracking_resource_adaptor[device_memory_resource]*>(
self.c_obj.get()))[0].log_outstanding_allocations()
# Note that this function is specifically designed to rethrow Python exceptions
# as C++ exceptions when called as a callback from C++, so it is noexcept from
# Cython's perspective.
cdef bool _oom_callback_function(size_t bytes, void *callback_arg) noexcept nogil:
cdef CppExcept err
with gil:
try:
return (<object>callback_arg)(bytes)
except BaseException as e:
err = translate_python_except_to_cpp(e)
throw_cpp_except(err)
cdef class FailureCallbackResourceAdaptor(UpstreamResourceAdaptor):
def __cinit__(
self,
DeviceMemoryResource upstream_mr,
object callback,
):
self._callback = callback
self.c_obj.reset(
new failure_callback_resource_adaptor[device_memory_resource](
upstream_mr.get_mr(),
<failure_callback_t>_oom_callback_function,
<void*>callback
)
)
def __init__(
self,
DeviceMemoryResource upstream_mr,
object callback,
):
"""
Memory resource that call callback when memory allocation fails.
Parameters
----------
upstream : DeviceMemoryResource
The upstream memory resource.
callback : callable
Function called when memory allocation fails.
"""
pass
# Global per-device memory resources; dict of int:DeviceMemoryResource
cdef _per_device_mrs = defaultdict(CudaMemoryResource)
cpdef void _initialize(
bool pool_allocator=False,
bool managed_memory=False,
object initial_pool_size=None,
object maximum_pool_size=None,
object devices=0,
bool logging=False,
object log_file_name=None,
) except *:
"""
Initializes RMM library using the options passed
"""
if managed_memory:
upstream = ManagedMemoryResource
else:
upstream = CudaMemoryResource
if pool_allocator:
typ = PoolMemoryResource
args = (upstream(),)
kwargs = dict(
initial_pool_size=initial_pool_size,
maximum_pool_size=maximum_pool_size
)
else:
typ = upstream
args = ()
kwargs = {}
cdef DeviceMemoryResource mr
cdef int original_device
# Save the current device so we can reset it
try:
original_device = getDevice()
except CUDARuntimeError as e:
if e.status == cudaError_t.cudaErrorNoDevice:
warnings.warn(e.msg)
else:
raise e
else:
# reset any previously specified per device resources
global _per_device_mrs
_per_device_mrs.clear()
if devices is None:
devices = [0]
elif isinstance(devices, int):
devices = [devices]
# create a memory resource per specified device
for device in devices:
setDevice(device)
if logging:
mr = LoggingResourceAdaptor(
typ(*args, **kwargs),
log_file_name
)
else:
mr = typ(*args, **kwargs)
set_per_device_resource(device, mr)
# reset CUDA device to original
setDevice(original_device)
cpdef get_per_device_resource(int device):
"""
Get the default memory resource for the specified device.
If the returned memory resource is used when a different device is the
active CUDA device, behavior is undefined.
Parameters
----------
device : int
The ID of the device for which to get the memory resource.
"""
global _per_device_mrs
return _per_device_mrs[device]
cpdef set_per_device_resource(int device, DeviceMemoryResource mr):
"""
Set the default memory resource for the specified device.
Parameters
----------
device : int
The ID of the device for which to get the memory resource.
mr : DeviceMemoryResource
The memory resource to set. Must have been created while device was
the active CUDA device.
"""
global _per_device_mrs
_per_device_mrs[device] = mr
# Since cuda_device_id does not have a default constructor, it must be heap
# allocated
cdef unique_ptr[cuda_device_id] device_id = \
make_unique[cuda_device_id](device)
cpp_set_per_device_resource(deref(device_id), mr.get_mr())
cpdef set_current_device_resource(DeviceMemoryResource mr):
"""
Set the default memory resource for the current device.
Parameters
----------
mr : DeviceMemoryResource
The memory resource to set. Must have been created while the current
device is the active CUDA device.
"""
set_per_device_resource(getDevice(), mr)
cpdef get_per_device_resource_type(int device):
"""
Get the memory resource type used for RMM device allocations on the
specified device.
Parameters
----------
device : int
The device ID
"""
return type(get_per_device_resource(device))
cpdef DeviceMemoryResource get_current_device_resource():
"""
Get the memory resource used for RMM device allocations on the current
device.
If the returned memory resource is used when a different device is the
active CUDA device, behavior is undefined.
"""
return get_per_device_resource(getDevice())
cpdef get_current_device_resource_type():
"""
Get the memory resource type used for RMM device allocations on the
current device.
"""
return type(get_current_device_resource())
cpdef is_initialized():
"""
Check whether RMM is initialized
"""
global _per_device_mrs
cdef DeviceMemoryResource each_mr
return all(
[each_mr.get_mr() is not NULL
for each_mr in _per_device_mrs.values()]
)
cpdef _flush_logs():
"""
Flush the logs of all currently initialized LoggingResourceAdaptor
memory resources
"""
global _per_device_mrs
cdef DeviceMemoryResource each_mr
for each_mr in _per_device_mrs.values():
if isinstance(each_mr, LoggingResourceAdaptor):
each_mr.flush()
def enable_logging(log_file_name=None):
"""
Enable logging of run-time events for all devices.
Parameters
----------
log_file_name: str, optional
Name of the log file. If not specified, the environment variable
RMM_LOG_FILE is used. A ValueError is thrown if neither is available.
A separate log file is produced for each device,
and the suffix `".dev{id}"` is automatically added to the log file
name.
Notes
-----
Note that if you use the environment variable CUDA_VISIBLE_DEVICES
with logging enabled, the suffix may not be what you expect. For
example, if you set CUDA_VISIBLE_DEVICES=1, the log file produced
will still have suffix `0`. Similarly, if you set
CUDA_VISIBLE_DEVICES=1,0 and use devices 0 and 1, the log file
with suffix `0` will correspond to the GPU with device ID `1`.
Use `rmm.get_log_filenames()` to get the log file names
corresponding to each device.
"""
global _per_device_mrs
devices = [0] if not _per_device_mrs.keys() else _per_device_mrs.keys()
for device in devices:
each_mr = <DeviceMemoryResource>_per_device_mrs[device]
if not isinstance(each_mr, LoggingResourceAdaptor):
set_per_device_resource(
device,
LoggingResourceAdaptor(each_mr, log_file_name)
)
def disable_logging():
"""
Disable logging if it was enabled previously using
`rmm.initialize()` or `rmm.enable_logging()`.
"""
global _per_device_mrs
for i, each_mr in _per_device_mrs.items():
if isinstance(each_mr, LoggingResourceAdaptor):
set_per_device_resource(i, each_mr.get_upstream())
def get_log_filenames():
"""
Returns the log filename (or `None` if not writing logs)
for each device in use.
Examples
--------
>>> import rmm
>>> rmm.reinitialize(devices=[0, 1], logging=True, log_file_name="rmm.log")
>>> rmm.get_log_filenames()
{0: '/home/user/workspace/rapids/rmm/python/rmm.dev0.log',
1: '/home/user/workspace/rapids/rmm/python/rmm.dev1.log'}
"""
global _per_device_mrs
return {
i: each_mr.get_file_name()
if isinstance(each_mr, LoggingResourceAdaptor)
else None
for i, each_mr in _per_device_mrs.items()
}
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/lib.pyx
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/device_buffer.pxd
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libc.stdint cimport uintptr_t
from libcpp.memory cimport unique_ptr
from rmm._cuda.stream cimport Stream
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm._lib.memory_resource cimport DeviceMemoryResource
cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_buffer:
device_buffer()
device_buffer(size_t size, cuda_stream_view stream) except +
device_buffer(const void* source_data,
size_t size, cuda_stream_view stream) except +
device_buffer(const device_buffer buf,
cuda_stream_view stream) except +
void reserve(size_t new_capacity, cuda_stream_view stream) except +
void resize(size_t new_size, cuda_stream_view stream) except +
void shrink_to_fit(cuda_stream_view stream) except +
void* data()
size_t size()
size_t capacity()
cdef class DeviceBuffer:
cdef unique_ptr[device_buffer] c_obj
# Holds a reference to the DeviceMemoryResource used for allocation.
# Ensures the MR does not get destroyed before this DeviceBuffer. `mr` is
# needed for deallocation
cdef DeviceMemoryResource mr
# Holds a reference to the stream used by the underlying `device_buffer`.
# Ensures the stream does not get destroyed before this DeviceBuffer
cdef Stream stream
@staticmethod
cdef DeviceBuffer c_from_unique_ptr(
unique_ptr[device_buffer] ptr,
Stream stream=*
)
@staticmethod
cdef DeviceBuffer c_to_device(const unsigned char[::1] b,
Stream stream=*) except *
cpdef copy_to_host(self, ary=*, Stream stream=*)
cpdef copy_from_host(self, ary, Stream stream=*)
cpdef copy_from_device(self, cuda_ary, Stream stream=*)
cpdef bytes tobytes(self, Stream stream=*)
cdef size_t c_size(self) except *
cpdef void reserve(self, size_t new_capacity, Stream stream=*) except *
cpdef void resize(self, size_t new_size, Stream stream=*) except *
cpdef size_t capacity(self) except *
cdef void* c_data(self) except *
cdef device_buffer c_release(self) except *
cpdef DeviceBuffer to_device(const unsigned char[::1] b,
Stream stream=*)
cpdef void copy_ptr_to_host(uintptr_t db,
unsigned char[::1] hb,
Stream stream=*) except *
cpdef void copy_host_to_ptr(const unsigned char[::1] hb,
uintptr_t db,
Stream stream=*) except *
cpdef void copy_device_to_ptr(uintptr_t d_src,
uintptr_t d_dst,
size_t count,
Stream stream=*) except *
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/cuda_stream.pyx
|
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cimport cython
from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool
@cython.final
cdef class CudaStream:
"""
Wrapper around a CUDA stream with RAII semantics.
When a CudaStream instance is GC'd, the underlying
CUDA stream is destroyed.
"""
def __cinit__(self):
self.c_obj.reset(new cuda_stream())
cdef cudaStream_t value(self) except * nogil:
return self.c_obj.get()[0].value()
cdef bool is_valid(self) except * nogil:
return self.c_obj.get()[0].is_valid()
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/cuda_stream_pool.pxd
|
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cimport cython
from rmm._lib.cuda_stream_view cimport cuda_stream_view
cdef extern from "rmm/cuda_stream_pool.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream_pool:
cuda_stream_pool(size_t pool_size)
cuda_stream_view get_stream()
cuda_stream_view get_stream(size_t stream_id) except +
size_t get_pool_size()
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/torch_allocator.pyx
|
from cuda.ccudart cimport cudaStream_t
from rmm._lib.cuda_stream_view cimport cuda_stream_view
from rmm._lib.memory_resource cimport device_memory_resource
from rmm._lib.per_device_resource cimport get_current_device_resource
cdef public void* allocate(
ssize_t size, int device, void* stream
) except * with gil:
cdef device_memory_resource* mr = get_current_device_resource()
cdef cuda_stream_view stream_view = cuda_stream_view(
<cudaStream_t>(stream)
)
return mr[0].allocate(size, stream_view)
cdef public void deallocate(
void* ptr, ssize_t size, void* stream
) except * with gil:
cdef device_memory_resource* mr = get_current_device_resource()
cdef cuda_stream_view stream_view = cuda_stream_view(
<cudaStream_t>(stream)
)
mr[0].deallocate(ptr, size, stream_view)
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/per_device_resource.pxd
|
from rmm._lib.memory_resource cimport device_memory_resource
cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil:
cdef cppclass cuda_device_id:
ctypedef int value_type
cuda_device_id(value_type id)
value_type value()
cdef extern from "rmm/mr/device/per_device_resource.hpp" \
namespace "rmm::mr" nogil:
cdef device_memory_resource* set_current_device_resource(
device_memory_resource* new_mr
)
cdef device_memory_resource* get_current_device_resource()
cdef device_memory_resource* set_per_device_resource(
cuda_device_id id, device_memory_resource* new_mr
)
cdef device_memory_resource* get_per_device_resource (
cuda_device_id id
)
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/cuda_stream_view.pxd
|
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool
cdef extern from "rmm/cuda_stream_view.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream_view:
cuda_stream_view()
cuda_stream_view(cudaStream_t)
cudaStream_t value()
bool is_default()
bool is_per_thread_default()
void synchronize() except +
cdef bool operator==(cuda_stream_view const, cuda_stream_view const)
const cuda_stream_view cuda_stream_default
const cuda_stream_view cuda_stream_legacy
const cuda_stream_view cuda_stream_per_thread
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/__init__.pxd
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/__init__.py
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .device_buffer import DeviceBuffer
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/device_buffer.pyx
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
cimport cython
from cpython.bytes cimport PyBytes_AS_STRING, PyBytes_FromStringAndSize
from libc.stdint cimport uintptr_t
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move
from rmm._cuda.stream cimport Stream
from rmm._cuda.stream import DEFAULT_STREAM
cimport cuda.ccudart as ccudart
from cuda.ccudart cimport (
cudaError,
cudaError_t,
cudaMemcpyAsync,
cudaMemcpyKind,
cudaStream_t,
)
from rmm._lib.memory_resource cimport get_current_device_resource
# The DeviceMemoryResource attribute could be released prematurely
# by the gc if the DeviceBuffer is in a reference cycle. Removing
# the tp_clear function with the no_gc_clear decoration prevents that.
# See https://github.com/rapidsai/rmm/pull/931 for details.
@cython.no_gc_clear
cdef class DeviceBuffer:
def __cinit__(self, *,
uintptr_t ptr=0,
size_t size=0,
Stream stream=DEFAULT_STREAM):
"""Construct a ``DeviceBuffer`` with optional size and data pointer
Parameters
----------
ptr : int
pointer to some data on host or device to copy over
size : int
size of the buffer to allocate
(and possibly size of data to copy)
stream : optional
CUDA stream to use for construction and/or copying,
defaults to the CUDA default stream. A reference to the
stream is stored internally to ensure it doesn't go out of
scope while the DeviceBuffer is in use. Destroying the
underlying stream while the DeviceBuffer is in use will
result in undefined behavior.
Note
----
If the pointer passed is non-null and ``stream`` is the default stream,
it is synchronized after the copy. However if a non-default ``stream``
is provided, this function is fully asynchronous.
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer(size=5)
"""
cdef const void* c_ptr
with nogil:
c_ptr = <const void*>ptr
if size == 0:
self.c_obj.reset(new device_buffer())
elif c_ptr == NULL:
self.c_obj.reset(new device_buffer(size, stream.view()))
else:
self.c_obj.reset(new device_buffer(c_ptr, size, stream.view()))
if stream.c_is_default():
stream.c_synchronize()
# Save a reference to the MR and stream used for allocation
self.mr = get_current_device_resource()
self.stream = stream
def __len__(self):
return self.size
def __sizeof__(self):
return self.size
def __bytes__(self):
return self.tobytes()
@property
def nbytes(self):
"""Gets the size of the buffer in bytes."""
return self.size
@property
def ptr(self):
"""Gets a pointer to the underlying data."""
return int(<uintptr_t>self.c_data())
@property
def size(self):
"""Gets the size of the buffer in bytes."""
return int(self.c_size())
def __reduce__(self):
return to_device, (self.copy_to_host(),)
@property
def __cuda_array_interface__(self):
cdef dict intf = {
"data": (self.ptr, False),
"shape": (self.size,),
"strides": None,
"typestr": "|u1",
"version": 0
}
return intf
def copy(self):
"""Returns a copy of DeviceBuffer.
Returns
-------
A deep copy of existing ``DeviceBuffer``
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer.to_device(b"abc")
>>> db_copy = db.copy()
>>> db.copy_to_host()
array([97, 98, 99], dtype=uint8)
>>> db_copy.copy_to_host()
array([97, 98, 99], dtype=uint8)
>>> assert db is not db_copy
>>> assert db.ptr != db_copy.ptr
"""
ret = DeviceBuffer(ptr=self.ptr, size=self.size, stream=self.stream)
ret.mr = self.mr
return ret
def __copy__(self):
return self.copy()
@staticmethod
cdef DeviceBuffer c_from_unique_ptr(
unique_ptr[device_buffer] ptr,
Stream stream=DEFAULT_STREAM
):
cdef DeviceBuffer buf = DeviceBuffer.__new__(DeviceBuffer)
if stream.c_is_default():
stream.c_synchronize()
buf.c_obj = move(ptr)
buf.mr = get_current_device_resource()
buf.stream = stream
return buf
@staticmethod
cdef DeviceBuffer c_to_device(const unsigned char[::1] b,
Stream stream=DEFAULT_STREAM) except *:
"""Calls ``to_device`` function on arguments provided"""
return to_device(b, stream)
@staticmethod
def to_device(const unsigned char[::1] b,
Stream stream=DEFAULT_STREAM):
"""Calls ``to_device`` function on arguments provided."""
return to_device(b, stream)
cpdef copy_to_host(self, ary=None, Stream stream=DEFAULT_STREAM):
"""Copy from a ``DeviceBuffer`` to a buffer on host.
Parameters
----------
ary : ``bytes``-like buffer to write into
stream : CUDA stream to use for copying, default the default stream
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer.to_device(b"abc")
>>> hb = bytearray(db.nbytes)
>>> db.copy_to_host(hb)
>>> print(hb)
bytearray(b'abc')
>>> hb = db.copy_to_host()
>>> print(hb)
bytearray(b'abc')
"""
cdef const device_buffer* dbp = self.c_obj.get()
cdef size_t s = dbp.size()
cdef unsigned char[::1] hb = ary
if hb is None:
# NumPy leverages huge pages under-the-hood,
# which speeds up the copy from device to host.
hb = ary = np.empty((s,), dtype="u1")
elif len(hb) < s:
raise ValueError(
"Argument `ary` is too small. Need space for %i bytes." % s
)
copy_ptr_to_host(<uintptr_t>dbp.data(), hb[:s], stream)
return ary
cpdef copy_from_host(self, ary, Stream stream=DEFAULT_STREAM):
"""Copy from a buffer on host to ``self``
Parameters
----------
ary : ``bytes``-like buffer to copy from
stream : CUDA stream to use for copying, default the default stream
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer(size=10)
>>> hb = b"abcdef"
>>> db.copy_from_host(hb)
>>> hb = db.copy_to_host()
>>> print(hb)
array([97, 98, 99, 0, 0, 0, 0, 0, 0, 0], dtype=uint8)
"""
cdef device_buffer* dbp = self.c_obj.get()
cdef const unsigned char[::1] hb = ary
cdef size_t s = len(hb)
if s > self.size:
raise ValueError(
"Argument `ary` is too large. Need space for %i bytes." % s
)
copy_host_to_ptr(hb[:s], <uintptr_t>dbp.data(), stream)
cpdef copy_from_device(self, cuda_ary,
Stream stream=DEFAULT_STREAM):
"""Copy from a buffer on host to ``self``
Parameters
----------
cuda_ary : object to copy from that has ``__cuda_array_interface__``
stream : CUDA stream to use for copying, default the default stream
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer(size=5)
>>> db2 = rmm.DeviceBuffer.to_device(b"abc")
>>> db.copy_from_device(db2)
>>> hb = db.copy_to_host()
>>> print(hb)
array([97, 98, 99, 0, 0], dtype=uint8)
"""
if not hasattr(cuda_ary, "__cuda_array_interface__"):
raise ValueError(
"Expected object to support `__cuda_array_interface__` "
"protocol"
)
cuda_ary_interface = cuda_ary.__cuda_array_interface__
shape = cuda_ary_interface["shape"]
strides = cuda_ary_interface.get("strides")
dtype = np.dtype(cuda_ary_interface["typestr"])
if len(shape) > 1:
raise ValueError(
"Only 1-D contiguous arrays are supported, got {}-D "
"array".format(str(len(shape)))
)
if strides is not None:
if strides[0] != dtype.itemsize:
raise ValueError(
"Only 1-D contiguous arrays are supported, got a "
"non-contiguous array"
)
cdef uintptr_t src_ptr = cuda_ary_interface["data"][0]
cdef size_t s = shape[0] * dtype.itemsize
if s > self.size:
raise ValueError(
"Argument `hb` is too large. Need space for %i bytes." % s
)
cdef device_buffer* dbp = self.c_obj.get()
copy_device_to_ptr(
<uintptr_t>src_ptr,
<uintptr_t>dbp.data(),
s,
stream
)
cpdef bytes tobytes(self, Stream stream=DEFAULT_STREAM):
cdef const device_buffer* dbp = self.c_obj.get()
cdef size_t s = dbp.size()
cdef bytes b = PyBytes_FromStringAndSize(NULL, s)
cdef unsigned char* p = <unsigned char*>PyBytes_AS_STRING(b)
cdef unsigned char[::1] mv = (<unsigned char[:(s + 1):1]>p)[:s]
self.copy_to_host(mv, stream)
return b
cdef size_t c_size(self) except *:
return self.c_obj.get()[0].size()
cpdef void reserve(self,
size_t new_capacity,
Stream stream=DEFAULT_STREAM) except *:
self.c_obj.get()[0].reserve(new_capacity, stream.view())
cpdef void resize(self,
size_t new_size,
Stream stream=DEFAULT_STREAM) except *:
self.c_obj.get()[0].resize(new_size, stream.view())
cpdef size_t capacity(self) except *:
return self.c_obj.get()[0].capacity()
cdef void* c_data(self) except *:
return self.c_obj.get()[0].data()
cdef device_buffer c_release(self) except *:
"""
Releases ownership of the data held by this DeviceBuffer.
"""
return move(cython.operator.dereference(self.c_obj))
@cython.boundscheck(False)
cpdef DeviceBuffer to_device(const unsigned char[::1] b,
Stream stream=DEFAULT_STREAM):
"""Return a new ``DeviceBuffer`` with a copy of the data.
Parameters
----------
b : ``bytes``-like data on host to copy to device
stream : CUDA stream to use for copying, default the default stream
Returns
-------
``DeviceBuffer`` with copy of data from host
Examples
--------
>>> import rmm
>>> db = rmm._lib.device_buffer.to_device(b"abc")
>>> print(bytes(db))
b'abc'
"""
if b is None:
raise TypeError(
"Argument 'b' has incorrect type"
" (expected bytes-like, got NoneType)"
)
cdef uintptr_t p = <uintptr_t>&b[0]
cdef size_t s = len(b)
return DeviceBuffer(ptr=p, size=s, stream=stream)
@cython.boundscheck(False)
cdef void _copy_async(const void* src,
void* dst,
size_t count,
ccudart.cudaMemcpyKind kind,
cuda_stream_view stream) except * nogil:
"""
Asynchronously copy data between host and/or device pointers.
This is a convenience wrapper around cudaMemcpyAsync that
checks for errors. Only used for internal implementation.
Parameters
----------
src : pointer to ``bytes``-like host buffer or device data to copy from
dst : pointer to ``bytes``-like host buffer or device data to copy into
count : the size in bytes to copy
kind : the kind of copy to perform
stream : CUDA stream to use for copying, default the default stream
"""
cdef cudaError_t err = cudaMemcpyAsync(dst, src, count, kind,
<cudaStream_t>stream)
if err != cudaError.cudaSuccess:
raise RuntimeError(f"Memcpy failed with error: {err}")
@cython.boundscheck(False)
cpdef void copy_ptr_to_host(uintptr_t db,
unsigned char[::1] hb,
Stream stream=DEFAULT_STREAM) except *:
"""Copy from a device pointer to a buffer on host
Parameters
----------
db : pointer to data on device to copy
hb : ``bytes``-like buffer to write into
stream : CUDA stream to use for copying, default the default stream
Note
----
If ``stream`` is the default stream, it is synchronized after the copy.
However if a non-default ``stream`` is provided, this function is fully
asynchronous.
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer.to_device(b"abc")
>>> hb = bytearray(db.nbytes)
>>> rmm._lib.device_buffer.copy_ptr_to_host(db.ptr, hb)
>>> print(hb)
bytearray(b'abc')
"""
if hb is None:
raise TypeError(
"Argument `hb` has incorrect type"
" (expected bytes-like, got NoneType)"
)
with nogil:
_copy_async(<const void*>db, <void*>&hb[0], len(hb),
cudaMemcpyKind.cudaMemcpyDeviceToHost, stream.view())
if stream.c_is_default():
stream.c_synchronize()
@cython.boundscheck(False)
cpdef void copy_host_to_ptr(const unsigned char[::1] hb,
uintptr_t db,
Stream stream=DEFAULT_STREAM) except *:
"""Copy from a host pointer to a device pointer
Parameters
----------
hb : ``bytes``-like host buffer to copy
db : pointer to data on device to write into
stream : CUDA stream to use for copying, default the default stream
Note
----
If ``stream`` is the default stream, it is synchronized after the copy.
However if a non-default ``stream`` is provided, this function is fully
asynchronous.
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer(size=10)
>>> hb = b"abc"
>>> rmm._lib.device_buffer.copy_host_to_ptr(hb, db.ptr)
>>> hb = db.copy_to_host()
>>> print(hb)
array([97, 98, 99, 0, 0, 0, 0, 0, 0, 0], dtype=uint8)
"""
if hb is None:
raise TypeError(
"Argument `hb` has incorrect type"
" (expected bytes-like, got NoneType)"
)
with nogil:
_copy_async(<const void*>&hb[0], <void*>db, len(hb),
cudaMemcpyKind.cudaMemcpyHostToDevice, stream.view())
if stream.c_is_default():
stream.c_synchronize()
@cython.boundscheck(False)
cpdef void copy_device_to_ptr(uintptr_t d_src,
uintptr_t d_dst,
size_t count,
Stream stream=DEFAULT_STREAM) except *:
"""Copy from a device pointer to a device pointer
Parameters
----------
d_src : pointer to data on device to copy from
d_dst : pointer to data on device to write into
count : the size in bytes to copy
stream : CUDA stream to use for copying, default the default stream
Examples
--------
>>> import rmm
>>> db = rmm.DeviceBuffer(size=5)
>>> db2 = rmm.DeviceBuffer.to_device(b"abc")
>>> rmm._lib.device_buffer.copy_device_to_ptr(db2.ptr, db.ptr, db2.size)
>>> hb = db.copy_to_host()
>>> hb
array([97, 98, 99, 0, 0], dtype=uint8)
"""
with nogil:
_copy_async(<const void*>d_src, <void*>d_dst, count,
cudaMemcpyKind.cudaMemcpyDeviceToDevice, stream.view())
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/_lib/logger.pyx
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from libcpp cimport bool
cdef extern from "spdlog/common.h" namespace "spdlog::level" nogil:
cpdef enum logging_level "spdlog::level::level_enum":
"""
The debug logging level for RMM.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Valid levels, in decreasing order of verbosity, are TRACE, DEBUG,
INFO, WARN, ERR, CRITICAL, and OFF. Default is INFO.
Examples
--------
>>> import rmm
>>> rmm.logging_level.DEBUG
<logging_level.DEBUG: 1>
>>> rmm.logging_level.DEBUG.value
1
>>> rmm.logging_level.DEBUG.name
'DEBUG'
See Also
--------
set_logging_level : Set the debug logging level
get_logging_level : Get the current debug logging level
"""
TRACE "spdlog::level::trace"
DEBUG "spdlog::level::debug"
INFO "spdlog::level::info"
WARN "spdlog::level::warn"
ERR "spdlog::level::err"
CRITICAL "spdlog::level::critical"
OFF "spdlog::level::off"
cdef extern from "spdlog/spdlog.h" namespace "spdlog" nogil:
cdef cppclass spdlog_logger "spdlog::logger":
spdlog_logger() except +
void set_level(logging_level level)
logging_level level()
void flush() except +
void flush_on(logging_level level)
logging_level flush_level()
bool should_log(logging_level msg_level)
cdef extern from "rmm/logger.hpp" namespace "rmm" nogil:
cdef spdlog_logger& logger() except +
def _validate_level_type(level):
if not isinstance(level, logging_level):
raise TypeError("level must be an instance of the logging_level enum")
def should_log(level):
"""
Check if a message at the given level would be logged.
A message at the given level would be logged if the current debug logging
level is set to a level that is at least as verbose than the given level,
*and* the RMM module is compiled for a logging level at least as verbose.
If these conditions are not both met, this function will return false.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Parameters
----------
level : logging_level
The debug logging level. Valid values are instances of the
``logging_level`` enum.
Returns
-------
should_log : bool
True if a message at the given level would be logged, False otherwise.
Raises
------
TypeError
If the logging level is not an instance of the ``logging_level`` enum.
"""
_validate_level_type(level)
return logger().should_log(level)
def set_logging_level(level):
"""
Set the debug logging level.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Parameters
----------
level : logging_level
The debug logging level. Valid values are instances of the
``logging_level`` enum.
Raises
------
TypeError
If the logging level is not an instance of the ``logging_level`` enum.
See Also
--------
get_logging_level : Get the current debug logging level.
Examples
--------
>>> import rmm
>>> rmm.set_logging_level(rmm.logging_level.WARN) # set logging level to warn
"""
_validate_level_type(level)
logger().set_level(level)
if not should_log(level):
warnings.warn(f"RMM will not log logging_level.{level.name}. This "
"may be because the C++ library is compiled for a "
"less-verbose logging level.")
def get_logging_level():
"""
Get the current debug logging level.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Returns
-------
level : logging_level
The current debug logging level, an instance of the ``logging_level``
enum.
See Also
--------
set_logging_level : Set the debug logging level.
Examples
--------
>>> import rmm
>>> rmm.get_logging_level() # get current logging level
<logging_level.INFO: 2>
"""
return logging_level(logger().level())
def flush_logger():
"""
Flush the debug logger. This will cause any buffered log messages to
be written to the log file.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
See Also
--------
set_flush_level : Set the flush level for the debug logger.
get_flush_level : Get the current debug logging flush level.
Examples
--------
>>> import rmm
>>> rmm.flush_logger() # flush the logger
"""
logger().flush()
def set_flush_level(level):
"""
Set the flush level for the debug logger. Messages of this level or higher
will automatically flush to the file.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Parameters
----------
level : logging_level
The debug logging level. Valid values are instances of the
``logging_level`` enum.
Raises
------
TypeError
If the logging level is not an instance of the ``logging_level`` enum.
See Also
--------
get_flush_level : Get the current debug logging flush level.
flush_logger : Flush the logger.
Examples
--------
>>> import rmm
>>> rmm.flush_on(rmm.logging_level.WARN) # set flush level to warn
"""
_validate_level_type(level)
logger().flush_on(level)
if not should_log(level):
warnings.warn(f"RMM will not log logging_level.{level.name}. This "
"may be because the C++ library is compiled for a "
"less-verbose logging level.")
def get_flush_level():
"""
Get the current debug logging flush level for the RMM logger. Messages of
this level or higher will automatically flush to the file.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Returns
-------
logging_level
The current flush level, an instance of the ``logging_level``
enum.
See Also
--------
set_flush_level : Set the flush level for the logger.
flush_logger : Flush the logger.
Examples
--------
>>> import rmm
>>> rmm.flush_level() # get current flush level
<logging_level.INFO: 2>
"""
return logging_level(logger().flush_level())
| 0 |
rapidsai_public_repos/rmm/python/rmm/_lib
|
rapidsai_public_repos/rmm/python/rmm/_lib/tests/test_device_buffer.pyx
|
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from libcpp.memory cimport make_unique
from rmm._lib.cuda_stream_view cimport cuda_stream_default
from rmm._lib.device_buffer cimport DeviceBuffer, device_buffer
def test_release():
expect = DeviceBuffer.to_device(b'abc')
cdef DeviceBuffer buf = DeviceBuffer.to_device(b'abc')
got = DeviceBuffer.c_from_unique_ptr(
make_unique[device_buffer](buf.c_release(),
cuda_stream_default.value())
)
np.testing.assert_equal(expect.copy_to_host(), got.copy_to_host())
def test_size_after_release():
cdef DeviceBuffer buf = DeviceBuffer.to_device(b'abc')
buf.c_release()
assert buf.size == 0
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/allocators/numba.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import inspect
from cuda.cuda import CUdeviceptr, cuIpcGetMemHandle
from numba import config, cuda
from numba.cuda import HostOnlyCUDAMemoryManager, IpcHandle, MemoryPointer
from rmm import _lib as librmm
def _make_emm_plugin_finalizer(handle, allocations):
"""
Factory to make the finalizer function.
We need to bind *handle* and *allocations* into the actual finalizer, which
takes no args.
"""
def finalizer():
"""
Invoked when the MemoryPointer is freed
"""
# At exit time (particularly in the Numba test suite) allocations may
# have already been cleaned up by a call to Context.reset() for the
# context, even if there are some DeviceNDArrays and their underlying
# allocations lying around. Finalizers then get called by weakref's
# atexit finalizer, at which point allocations[handle] no longer
# exists. This is harmless, except that a traceback is printed just
# prior to exit (without abnormally terminating the program), but is
# worrying for the user. To avoid the traceback, we check if
# allocations is already empty.
#
# In the case where allocations is not empty, but handle is not in
# allocations, then something has gone wrong - so we only guard against
# allocations being completely empty, rather than handle not being in
# allocations.
if allocations:
del allocations[handle]
return finalizer
class RMMNumbaManager(HostOnlyCUDAMemoryManager):
"""
External Memory Management Plugin implementation for Numba. Provides
on-device allocation only.
See https://numba.readthedocs.io/en/stable/cuda/external-memory.html for
details of the interface being implemented here.
"""
def initialize(self):
# No special initialization needed to use RMM within a given context.
pass
def memalloc(self, size):
"""
Allocate an on-device array from the RMM pool.
"""
buf = librmm.DeviceBuffer(size=size)
ctx = self.context
if config.CUDA_USE_NVIDIA_BINDING:
ptr = CUdeviceptr(int(buf.ptr))
else:
# expect ctypes bindings in numba
ptr = ctypes.c_uint64(int(buf.ptr))
finalizer = _make_emm_plugin_finalizer(int(buf.ptr), self.allocations)
# self.allocations is initialized by the parent, HostOnlyCUDAManager,
# and cleared upon context reset, so although we insert into it here
# and delete from it in the finalizer, we need not do any other
# housekeeping elsewhere.
self.allocations[int(buf.ptr)] = buf
return MemoryPointer(ctx, ptr, size, finalizer=finalizer)
def get_ipc_handle(self, memory):
"""
Get an IPC handle for the MemoryPointer memory with offset modified by
the RMM memory pool.
"""
start, end = cuda.cudadrv.driver.device_extents(memory)
if config.CUDA_USE_NVIDIA_BINDING:
_, ipc_handle = cuIpcGetMemHandle(start)
offset = int(memory.handle) - int(start)
else:
ipc_handle = (ctypes.c_byte * 64)() # IPC handle is 64 bytes
cuda.cudadrv.driver.driver.cuIpcGetMemHandle(
ctypes.byref(ipc_handle),
start,
)
offset = memory.handle.value - start
source_info = cuda.current_context().device.get_device_identity()
return IpcHandle(
memory, ipc_handle, memory.size, source_info, offset=offset
)
def get_memory_info(self):
"""Returns ``(free, total)`` memory in bytes in the context.
This implementation raises `NotImplementedError` because the allocation
will be performed using rmm's currently set default mr, which may be a
pool allocator.
"""
raise NotImplementedError()
@property
def interface_version(self):
return 1
# The parent class docstrings contain references without fully qualified names,
# so we need to replace them here for our Sphinx docs to render properly.
for _, method in inspect.getmembers(RMMNumbaManager, inspect.isfunction):
if method.__doc__ is not None:
method.__doc__ = method.__doc__.replace(
":class:`BaseCUDAMemoryManager`",
":class:`numba.cuda.BaseCUDAMemoryManager`",
)
# Enables the use of RMM for Numba via an environment variable setting,
# NUMBA_CUDA_MEMORY_MANAGER=rmm. See:
# https://numba.readthedocs.io/en/stable/cuda/external-memory.html#environment-variable
_numba_memory_manager = RMMNumbaManager
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/allocators/torch.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from torch.cuda.memory import CUDAPluggableAllocator
except ImportError:
rmm_torch_allocator = None
else:
import rmm._lib.torch_allocator
_alloc_free_lib_path = rmm._lib.torch_allocator.__file__
rmm_torch_allocator = CUDAPluggableAllocator(
_alloc_free_lib_path,
alloc_fn_name="allocate",
free_fn_name="deallocate",
)
| 0 |
rapidsai_public_repos/rmm/python/rmm
|
rapidsai_public_repos/rmm/python/rmm/allocators/cupy.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rmm import _lib as librmm
from rmm._cuda.stream import Stream
try:
import cupy
except ImportError:
cupy = None
def rmm_cupy_allocator(nbytes):
"""
A CuPy allocator that makes use of RMM.
Examples
--------
>>> from rmm.allocators.cupy import rmm_cupy_allocator
>>> import cupy
>>> cupy.cuda.set_allocator(rmm_cupy_allocator)
"""
if cupy is None:
raise ModuleNotFoundError("No module named 'cupy'")
stream = Stream(obj=cupy.cuda.get_current_stream())
buf = librmm.device_buffer.DeviceBuffer(size=nbytes, stream=stream)
dev_id = -1 if buf.ptr else cupy.cuda.device.get_device_id()
mem = cupy.cuda.UnownedMemory(
ptr=buf.ptr, size=buf.size, owner=buf, device_id=dev_id
)
ptr = cupy.cuda.memory.MemoryPointer(mem, 0)
return ptr
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/device_buffer_tests.cu
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/exec_policy.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <thrust/equal.h>
#include <thrust/sequence.h>
#include <gtest/gtest.h>
namespace testing {
namespace thrust = THRUST_NS_QUALIFIER;
} // namespace testing
using namespace testing;
#include <cuda_runtime_api.h>
#include <cstddef>
#include <random>
template <typename MemoryResourceType>
struct DeviceBufferTest : public ::testing::Test {
rmm::cuda_stream stream{};
std::size_t size{};
MemoryResourceType mr{};
DeviceBufferTest()
{
std::default_random_engine generator;
auto constexpr range_min{1000};
auto constexpr range_max{100000};
std::uniform_int_distribution<std::size_t> distribution(range_min, range_max);
size = distribution(generator);
}
};
using resources = ::testing::Types<rmm::mr::cuda_memory_resource, rmm::mr::managed_memory_resource>;
using async_resource_ref = cuda::mr::async_resource_ref<cuda::mr::device_accessible>;
TYPED_TEST_CASE(DeviceBufferTest, resources);
TYPED_TEST(DeviceBufferTest, EmptyBuffer)
{
rmm::device_buffer buff(0, rmm::cuda_stream_view{});
EXPECT_TRUE(buff.is_empty());
}
TYPED_TEST(DeviceBufferTest, DefaultMemoryResource)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_view{});
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.ssize());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(async_resource_ref{rmm::mr::get_current_device_resource()}, buff.memory_resource());
EXPECT_EQ(rmm::cuda_stream_view{}, buff.stream());
}
TYPED_TEST(DeviceBufferTest, DefaultMemoryResourceStream)
{
rmm::device_buffer buff(this->size, this->stream);
this->stream.synchronize();
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(async_resource_ref{rmm::mr::get_current_device_resource()}, buff.memory_resource());
EXPECT_EQ(this->stream, buff.stream());
}
TYPED_TEST(DeviceBufferTest, ExplicitMemoryResource)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_view{}, this->mr);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(async_resource_ref{this->mr}, buff.memory_resource());
EXPECT_EQ(rmm::cuda_stream_view{}, buff.stream());
}
TYPED_TEST(DeviceBufferTest, ExplicitMemoryResourceStream)
{
rmm::device_buffer buff(this->size, this->stream, this->mr);
this->stream.synchronize();
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(async_resource_ref{this->mr}, buff.memory_resource());
EXPECT_EQ(this->stream, buff.stream());
}
TYPED_TEST(DeviceBufferTest, CopyFromRawDevicePointer)
{
void* device_memory{nullptr};
EXPECT_EQ(cudaSuccess, cudaMalloc(&device_memory, this->size));
rmm::device_buffer buff(device_memory, this->size, rmm::cuda_stream_view{});
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(async_resource_ref{rmm::mr::get_current_device_resource()}, buff.memory_resource());
EXPECT_EQ(rmm::cuda_stream_view{}, buff.stream());
// TODO check for equality between the contents of the two allocations
buff.stream().synchronize();
EXPECT_EQ(cudaSuccess, cudaFree(device_memory));
}
TYPED_TEST(DeviceBufferTest, CopyFromRawHostPointer)
{
std::vector<uint8_t> host_data(this->size);
rmm::device_buffer buff(
static_cast<void*>(host_data.data()), this->size, rmm::cuda_stream_view{});
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(async_resource_ref{rmm::mr::get_current_device_resource()}, buff.memory_resource());
EXPECT_EQ(rmm::cuda_stream_view{}, buff.stream());
buff.stream().synchronize();
// TODO check for equality between the contents of the two allocations
}
TYPED_TEST(DeviceBufferTest, CopyFromNullptr)
{
// can copy from a nullptr only if size == 0
rmm::device_buffer buff(nullptr, 0, rmm::cuda_stream_view{});
EXPECT_EQ(nullptr, buff.data());
EXPECT_EQ(0, buff.size());
EXPECT_EQ(0, buff.capacity());
EXPECT_EQ(async_resource_ref{rmm::mr::get_current_device_resource()}, buff.memory_resource());
EXPECT_EQ(rmm::cuda_stream_view{}, buff.stream());
}
TYPED_TEST(DeviceBufferTest, CopyFromNullptrNonZero)
{
// can copy from a nullptr only if size == 0
EXPECT_THROW(rmm::device_buffer buff(nullptr, 1, rmm::cuda_stream_view{}), rmm::logic_error);
}
TYPED_TEST(DeviceBufferTest, CopyConstructor)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_view{}, &this->mr);
// Initialize buffer
thrust::sequence(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<char*>(buff.data()) + buff.size(),
0);
rmm::device_buffer buff_copy(buff, rmm::cuda_stream_default); // uses default MR
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
EXPECT_EQ(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff_copy.memory_resource(),
async_resource_ref{rmm::mr::get_current_device_resource()});
EXPECT_EQ(buff_copy.stream(), rmm::cuda_stream_view{});
EXPECT_TRUE(thrust::equal(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<char*>(buff.data()) + buff.size(),
static_cast<char*>(buff_copy.data())));
// now use buff's stream and MR
rmm::device_buffer buff_copy2(buff, buff.stream(), buff.memory_resource());
EXPECT_EQ(buff_copy2.memory_resource(), buff.memory_resource());
EXPECT_EQ(buff_copy2.memory_resource(), buff.memory_resource());
EXPECT_EQ(buff_copy2.stream(), buff.stream());
EXPECT_TRUE(thrust::equal(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
static_cast<signed char*>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyCapacityLargerThanSize)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
// Resizing smaller to make `size()` < `capacity()`
auto new_size = this->size - 1;
buff.resize(new_size, rmm::cuda_stream_default);
thrust::sequence(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
0);
rmm::device_buffer buff_copy(buff, rmm::cuda_stream_default);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
// The capacity of the copy should be equal to the `size()` of the original
EXPECT_EQ(new_size, buff_copy.capacity());
EXPECT_EQ(buff_copy.memory_resource(),
async_resource_ref{rmm::mr::get_current_device_resource()});
EXPECT_EQ(buff_copy.stream(), rmm::cuda_stream_view{});
EXPECT_TRUE(thrust::equal(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
static_cast<signed char*>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyConstructorExplicitMr)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
thrust::sequence(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
0);
rmm::device_buffer buff_copy(buff, this->stream, &this->mr);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
EXPECT_EQ(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff.memory_resource(), buff_copy.memory_resource());
EXPECT_NE(buff.stream(), buff_copy.stream());
EXPECT_TRUE(thrust::equal(rmm::exec_policy(buff_copy.stream()),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
static_cast<signed char*>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyCapacityLargerThanSizeExplicitMr)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
// Resizing smaller to make `size()` < `capacity()`
auto new_size = this->size - 1;
buff.resize(new_size, rmm::cuda_stream_default);
thrust::sequence(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
0);
rmm::device_buffer buff_copy(buff, this->stream, &this->mr);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
// The capacity of the copy should be equal to the `size()` of the original
EXPECT_EQ(new_size, buff_copy.capacity());
EXPECT_NE(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff.memory_resource(), buff_copy.memory_resource());
EXPECT_NE(buff.stream(), buff_copy.stream());
EXPECT_TRUE(thrust::equal(rmm::exec_policy(buff_copy.stream()),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
static_cast<signed char*>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, MoveConstructor)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
auto* ptr = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
// New buffer should have the same contents as the original
rmm::device_buffer buff_new(std::move(buff));
EXPECT_NE(nullptr, buff_new.data());
EXPECT_EQ(ptr, buff_new.data());
EXPECT_EQ(size, buff_new.size());
EXPECT_EQ(capacity, buff_new.capacity());
EXPECT_EQ(stream, buff_new.stream());
EXPECT_EQ(mr, buff_new.memory_resource());
// Original buffer should be empty
EXPECT_EQ(nullptr,
buff.data()); // NOLINT(bugprone-use-after-move, clang-analyzer-cplusplus.Move)
EXPECT_EQ(0, buff.size()); // NOLINT(bugprone-use-after-move)
EXPECT_EQ(0, buff.capacity()); // NOLINT(bugprone-use-after-move)
EXPECT_EQ(rmm::cuda_stream_default, buff.stream()); // NOLINT(bugprone-use-after-move)
}
TYPED_TEST(DeviceBufferTest, MoveConstructorStream)
{
rmm::device_buffer buff(this->size, this->stream, &this->mr);
this->stream.synchronize();
auto* ptr = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
// New buffer should have the same contents as the original
rmm::device_buffer buff_new(std::move(buff));
this->stream.synchronize();
EXPECT_NE(nullptr, buff_new.data());
EXPECT_EQ(ptr, buff_new.data());
EXPECT_EQ(size, buff_new.size());
EXPECT_EQ(capacity, buff_new.capacity());
EXPECT_EQ(stream, buff_new.stream());
EXPECT_EQ(mr, buff_new.memory_resource());
// Original buffer should be empty
EXPECT_EQ(nullptr,
buff.data()); // NOLINT(bugprone-use-after-move, clang-analyzer-cplusplus.Move)
EXPECT_EQ(0, buff.size()); // NOLINT(bugprone-use-after-move)
EXPECT_EQ(0, buff.capacity()); // NOLINT(bugprone-use-after-move)
EXPECT_EQ(rmm::cuda_stream_view{}, buff.stream()); // NOLINT(bugprone-use-after-move)
}
TYPED_TEST(DeviceBufferTest, MoveAssignmentToDefault)
{
rmm::device_buffer src(this->size, rmm::cuda_stream_default, &this->mr);
auto* ptr = src.data();
auto size = src.size();
auto capacity = src.capacity();
auto mr = src.memory_resource();
auto stream = src.stream();
rmm::device_buffer dest;
dest = std::move(src);
// contents of `from` should be in `to`
EXPECT_NE(nullptr, dest.data());
EXPECT_EQ(ptr, dest.data());
EXPECT_EQ(size, dest.size());
EXPECT_EQ(capacity, dest.capacity());
EXPECT_EQ(stream, dest.stream());
EXPECT_EQ(mr, dest.memory_resource());
// `from` should be empty
EXPECT_EQ(nullptr, src.data()); // NOLINT(bugprone-use-after-move,clang-analyzer-cplusplus.Move)
EXPECT_EQ(0, src.size());
EXPECT_EQ(0, src.capacity());
EXPECT_EQ(rmm::cuda_stream_default, src.stream());
}
TYPED_TEST(DeviceBufferTest, MoveAssignment)
{
rmm::device_buffer src(this->size, rmm::cuda_stream_default, &this->mr);
auto* ptr = src.data();
auto size = src.size();
auto capacity = src.capacity();
auto mr = src.memory_resource();
auto stream = src.stream();
rmm::device_buffer dest(this->size - 1, rmm::cuda_stream_default, &this->mr);
dest = std::move(src);
// contents of `from` should be in `to`
EXPECT_NE(nullptr, dest.data());
EXPECT_EQ(ptr, dest.data());
EXPECT_EQ(size, dest.size());
EXPECT_EQ(capacity, dest.capacity());
EXPECT_EQ(stream, dest.stream());
EXPECT_EQ(mr, dest.memory_resource());
// `from` should be empty
EXPECT_EQ(nullptr, src.data()); // NOLINT(bugprone-use-after-move,clang-analyzer-cplusplus.Move)
EXPECT_EQ(0, src.size());
EXPECT_EQ(0, src.capacity());
EXPECT_EQ(rmm::cuda_stream_default, src.stream());
}
TYPED_TEST(DeviceBufferTest, SelfMoveAssignment)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
auto* ptr = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
buff = std::move(buff); // self-move-assignment shouldn't modify the buffer
EXPECT_NE(nullptr, buff.data()); // NOLINT(bugprone-use-after-move,clang-analyzer-cplusplus.Move)
EXPECT_EQ(ptr, buff.data());
EXPECT_EQ(size, buff.size());
EXPECT_EQ(capacity, buff.capacity());
EXPECT_EQ(stream, buff.stream());
EXPECT_EQ(mr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, ResizeSmaller)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
thrust::sequence(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
0);
auto* old_data = buff.data();
rmm::device_buffer old_content(
old_data, buff.size(), rmm::cuda_stream_default, &this->mr); // for comparison
auto new_size = this->size - 1;
buff.resize(new_size, rmm::cuda_stream_default);
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(this->size, buff.capacity()); // Capacity should be unchanged
// Resizing smaller means the existing allocation should remain unchanged
EXPECT_EQ(old_data, buff.data());
buff.shrink_to_fit(rmm::cuda_stream_default);
EXPECT_NE(nullptr, buff.data());
// A reallocation should have occurred
EXPECT_NE(old_data, buff.data());
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(buff.capacity(), buff.size());
EXPECT_TRUE(thrust::equal(rmm::exec_policy(rmm::cuda_stream_default),
static_cast<signed char*>(buff.data()),
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
static_cast<signed char*>(buff.data()) + buff.size(),
static_cast<signed char*>(old_content.data())));
}
TYPED_TEST(DeviceBufferTest, ResizeBigger)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
auto* old_data = buff.data();
auto new_size = this->size + 1;
buff.resize(new_size, rmm::cuda_stream_default);
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(new_size, buff.capacity());
// Resizing bigger means the data should point to a new allocation
EXPECT_NE(old_data, buff.data());
}
TYPED_TEST(DeviceBufferTest, ReserveSmaller)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
auto* const old_data = buff.data();
auto const old_capacity = buff.capacity();
auto const new_capacity = buff.capacity() - 1;
buff.reserve(new_capacity, rmm::cuda_stream_default);
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(old_capacity, buff.capacity());
// Reserving smaller means the allocation is unchanged
EXPECT_EQ(old_data, buff.data());
}
TYPED_TEST(DeviceBufferTest, ReserveBigger)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
auto* const old_data = buff.data();
auto const new_capacity = buff.capacity() + 1;
buff.reserve(new_capacity, rmm::cuda_stream_default);
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(new_capacity, buff.capacity());
// Reserving bigger means the data should point to a new allocation
EXPECT_NE(old_data, buff.data());
}
TYPED_TEST(DeviceBufferTest, SetGetStream)
{
rmm::device_buffer buff(this->size, rmm::cuda_stream_default, &this->mr);
EXPECT_EQ(buff.stream(), rmm::cuda_stream_default);
rmm::cuda_stream_view const otherstream{cudaStreamPerThread};
buff.set_stream(otherstream);
EXPECT_EQ(buff.stream(), otherstream);
}
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/byte_literals.hpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
namespace rmm::test {
constexpr auto kilo{long{1} << 10};
constexpr auto mega{long{1} << 20};
constexpr auto giga{long{1} << 30};
constexpr auto tera{long{1} << 40};
constexpr auto peta{long{1} << 50};
// user-defined Byte literals
constexpr unsigned long long operator""_B(unsigned long long val) { return val; }
constexpr unsigned long long operator""_KiB(unsigned long long const val) { return kilo * val; }
constexpr unsigned long long operator""_MiB(unsigned long long const val) { return mega * val; }
constexpr unsigned long long operator""_GiB(unsigned long long const val) { return giga * val; }
constexpr unsigned long long operator""_TiB(unsigned long long const val) { return tera * val; }
constexpr unsigned long long operator""_PiB(unsigned long long const val) { return peta * val; }
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/device_scalar_tests.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <rmm/cuda_stream.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <cuda_runtime_api.h>
#include <chrono>
#include <cstddef>
#include <random>
#include <type_traits>
// explicit instantiation for test coverage purposes
template class rmm::device_scalar<int>;
template <typename T>
struct DeviceScalarTest : public ::testing::Test {
std::default_random_engine generator{};
T value{};
rmm::cuda_stream stream{};
rmm::mr::device_memory_resource* mr{rmm::mr::get_current_device_resource()};
DeviceScalarTest() : value{random_value()} {}
template <typename U = T, std::enable_if_t<std::is_same<U, bool>::value, bool> = true>
U random_value()
{
static std::bernoulli_distribution distribution{};
return distribution(generator);
}
template <
typename U = T,
std::enable_if_t<(std::is_integral<U>::value && not std::is_same<U, bool>::value), bool> = true>
U random_value()
{
static std::uniform_int_distribution<U> distribution{std::numeric_limits<T>::lowest(),
std::numeric_limits<T>::max()};
return distribution(generator);
}
template <typename U = T, std::enable_if_t<std::is_floating_point<U>::value, bool> = true>
U random_value()
{
auto const mean{100};
auto const stddev{20};
static std::normal_distribution<U> distribution(mean, stddev);
return distribution(generator);
}
};
using Types = ::testing::Types<bool, int8_t, int16_t, int32_t, int64_t, float, double>;
TYPED_TEST_CASE(DeviceScalarTest, Types);
TYPED_TEST(DeviceScalarTest, Uninitialized)
{
rmm::device_scalar<TypeParam> scalar{this->stream, this->mr};
EXPECT_NE(nullptr, scalar.data());
}
TYPED_TEST(DeviceScalarTest, InitialValue)
{
rmm::device_scalar<TypeParam> scalar{this->value, this->stream, this->mr};
EXPECT_NE(nullptr, scalar.data());
EXPECT_EQ(this->value, scalar.value(this->stream));
}
// test const version of data()
TYPED_TEST(DeviceScalarTest, ConstPtrData)
{
rmm::device_scalar<TypeParam> const scalar{this->value, this->stream, this->mr};
auto const* data = scalar.data();
EXPECT_NE(nullptr, data);
}
TYPED_TEST(DeviceScalarTest, CopyCtor)
{
rmm::device_scalar<TypeParam> scalar{this->value, this->stream, this->mr};
EXPECT_NE(nullptr, scalar.data());
EXPECT_EQ(this->value, scalar.value(this->stream));
rmm::device_scalar<TypeParam> copy{scalar, this->stream, this->mr};
EXPECT_NE(nullptr, copy.data());
EXPECT_NE(copy.data(), scalar.data());
EXPECT_EQ(copy.value(this->stream), scalar.value(this->stream));
}
TYPED_TEST(DeviceScalarTest, MoveCtor)
{
rmm::device_scalar<TypeParam> scalar{this->value, this->stream, this->mr};
EXPECT_NE(nullptr, scalar.data());
EXPECT_EQ(this->value, scalar.value(this->stream));
auto* original_pointer = scalar.data();
auto original_value = scalar.value(this->stream);
rmm::device_scalar<TypeParam> moved_to{std::move(scalar)};
EXPECT_NE(nullptr, moved_to.data());
EXPECT_EQ(moved_to.data(), original_pointer);
EXPECT_EQ(moved_to.value(this->stream), original_value);
// NOLINTNEXTLINE(bugprone-use-after-move,clang-analyzer-cplusplus.Move)
EXPECT_EQ(nullptr, scalar.data());
}
TYPED_TEST(DeviceScalarTest, SetValue)
{
rmm::device_scalar<TypeParam> scalar{this->value, this->stream, this->mr};
EXPECT_NE(nullptr, scalar.data());
auto expected = this->random_value();
scalar.set_value_async(expected, this->stream);
EXPECT_EQ(expected, scalar.value(this->stream));
}
TYPED_TEST(DeviceScalarTest, SetValueToZero)
{
rmm::device_scalar<TypeParam> scalar{this->value, this->stream, this->mr};
EXPECT_NE(nullptr, scalar.data());
scalar.set_value_to_zero_async(this->stream);
EXPECT_EQ(TypeParam{0}, scalar.value(this->stream));
}
TYPED_TEST(DeviceScalarTest, SetGetStream)
{
rmm::device_scalar<TypeParam> scalar(this->value, this->stream, this->mr);
EXPECT_EQ(scalar.stream(), this->stream);
rmm::cuda_stream_view const otherstream{cudaStreamPerThread};
scalar.set_stream(otherstream);
EXPECT_EQ(scalar.stream(), otherstream);
}
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/container_multidevice_tests.cu
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "device_check_resource_adaptor.hpp"
#include "rmm/mr/device/per_device_resource.hpp"
#include <rmm/cuda_stream.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <type_traits>
template <typename ContainerType>
struct ContainerMultiDeviceTest : public ::testing::Test {};
using containers =
::testing::Types<rmm::device_buffer, rmm::device_uvector<int>, rmm::device_scalar<int>>;
TYPED_TEST_CASE(ContainerMultiDeviceTest, containers);
TYPED_TEST(ContainerMultiDeviceTest, CreateDestroyDifferentActiveDevice)
{
// Get the number of cuda devices
int num_devices = rmm::get_num_cuda_devices();
// only run on multidevice systems
if (num_devices >= 2) {
rmm::cuda_set_device_raii dev{rmm::cuda_device_id{0}};
auto* orig_mr = rmm::mr::get_current_device_resource();
auto check_mr = device_check_resource_adaptor{orig_mr};
rmm::mr::set_current_device_resource(&check_mr);
{
if constexpr (std::is_same_v<TypeParam, rmm::device_scalar<int>>) {
auto buf = TypeParam(rmm::cuda_stream_view{});
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(1)); // force dtor with different active device
} else {
auto buf = TypeParam(128, rmm::cuda_stream_view{});
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(1)); // force dtor with different active device
}
}
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(0));
rmm::mr::set_current_device_resource(orig_mr);
}
}
TYPED_TEST(ContainerMultiDeviceTest, CreateMoveDestroyDifferentActiveDevice)
{
// Get the number of cuda devices
int num_devices = rmm::get_num_cuda_devices();
// only run on multidevice systems
if (num_devices >= 2) {
rmm::cuda_set_device_raii dev{rmm::cuda_device_id{0}};
auto* orig_mr = rmm::mr::get_current_device_resource();
auto check_mr = device_check_resource_adaptor{orig_mr};
rmm::mr::set_current_device_resource(&check_mr);
{
auto buf_1 = []() {
if constexpr (std::is_same_v<TypeParam, rmm::device_scalar<int>>) {
return TypeParam(rmm::cuda_stream_view{});
} else {
return TypeParam(128, rmm::cuda_stream_view{});
}
}();
{
if constexpr (std::is_same_v<TypeParam, rmm::device_scalar<int>>) {
// device_vector does not have a constructor that takes a stream
auto buf_0 = TypeParam(rmm::cuda_stream_view{});
buf_1 = std::move(buf_0);
} else {
auto buf_0 = TypeParam(128, rmm::cuda_stream_view{});
buf_1 = std::move(buf_0);
}
}
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(1)); // force dtor with different active device
}
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(0));
rmm::mr::set_current_device_resource(orig_mr);
}
}
TYPED_TEST(ContainerMultiDeviceTest, ResizeDifferentActiveDevice)
{
// Get the number of cuda devices
int num_devices = rmm::get_num_cuda_devices();
// only run on multidevice systems
if (num_devices >= 2) {
rmm::cuda_set_device_raii dev{rmm::cuda_device_id{0}};
auto* orig_mr = rmm::mr::get_current_device_resource();
auto check_mr = device_check_resource_adaptor{orig_mr};
rmm::mr::set_current_device_resource(&check_mr);
if constexpr (not std::is_same_v<TypeParam, rmm::device_scalar<int>>) {
auto buf = TypeParam(128, rmm::cuda_stream_view{});
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(1)); // force resize with different active device
buf.resize(1024, rmm::cuda_stream_view{});
}
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(0));
rmm::mr::set_current_device_resource(orig_mr);
}
}
TYPED_TEST(ContainerMultiDeviceTest, ShrinkDifferentActiveDevice)
{
// Get the number of cuda devices
int num_devices = rmm::get_num_cuda_devices();
// only run on multidevice systems
if (num_devices >= 2) {
rmm::cuda_set_device_raii dev{rmm::cuda_device_id{0}};
auto* orig_mr = rmm::mr::get_current_device_resource();
auto check_mr = device_check_resource_adaptor{orig_mr};
rmm::mr::set_current_device_resource(&check_mr);
if constexpr (not std::is_same_v<TypeParam, rmm::device_scalar<int>>) {
auto buf = TypeParam(128, rmm::cuda_stream_view{});
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(1)); // force resize with different active device
buf.resize(64, rmm::cuda_stream_view{});
buf.shrink_to_fit(rmm::cuda_stream_view{});
}
RMM_ASSERT_CUDA_SUCCESS(cudaSetDevice(0));
rmm::mr::set_current_device_resource(orig_mr);
}
}
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Build options
option(DISABLE_DEPRECATION_WARNING "Disable warnings generated from deprecated declarations." OFF)
option(CODE_COVERAGE "Enable generating code coverage with gcov." OFF)
include(rapids-test)
rapids_test_init()
# This function takes in a test name and test source and handles setting all of the associated
# properties and linking to build the test
function(ConfigureTestInternal TEST_NAME)
add_executable(${TEST_NAME} ${ARGN})
target_include_directories(${TEST_NAME} PRIVATE "$<BUILD_INTERFACE:${RMM_SOURCE_DIR}>")
target_link_libraries(${TEST_NAME} GTest::gmock GTest::gtest GTest::gmock_main GTest::gtest_main
pthread rmm)
set_target_properties(
${TEST_NAME}
PROPERTIES POSITION_INDEPENDENT_CODE ON
RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${RMM_BINARY_DIR}/gtests>"
CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}"
INSTALL_RPATH "\$ORIGIN/../../../lib"
CXX_STANDARD 17
CXX_STANDARD_REQUIRED ON
CUDA_STANDARD 17
CUDA_STANDARD_REQUIRED ON)
target_compile_definitions(${TEST_NAME}
PUBLIC "SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}")
target_compile_options(${TEST_NAME} PUBLIC $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:-Wall -Werror
-Wno-error=deprecated-declarations>)
if(DISABLE_DEPRECATION_WARNING)
target_compile_options(
${TEST_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-Wno-deprecated-declarations>)
target_compile_options(${TEST_NAME}
PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-Wno-deprecated-declarations>)
endif()
if(CODE_COVERAGE)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(KEEP_DIR ${CMAKE_CURRENT_BINARY_DIR}/tmp)
make_directory(${KEEP_DIR})
target_compile_options(${TEST_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:--keep
--keep-dir=${KEEP_DIR}>)
target_compile_options(
${TEST_NAME}
PUBLIC
$<$<COMPILE_LANGUAGE:CUDA>:-O0
-Xcompiler=--coverage,-fprofile-abs-path,-fkeep-inline-functions,-fno-elide-constructors>)
target_compile_options(
${TEST_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-O0 --coverage -fprofile-abs-path
-fkeep-inline-functions -fno-elide-constructors>)
target_link_options(${TEST_NAME} PRIVATE --coverage)
target_link_libraries(${TEST_NAME} gcov)
endif()
# Add coverage-generated files to clean target
list(APPEND COVERAGE_CLEAN_FILES "**/*.gcno" "**/*.gcda")
set_property(
TARGET ${TEST_NAME}
APPEND
PROPERTY ADDITIONAL_CLEAN_FILES ${COVERAGE_CLEAN_FILES})
endif()
endfunction()
# Wrapper around `ConfigureTestInternal` that builds tests both with and without per thread default
# stream
function(ConfigureTest TEST_NAME)
set(options)
set(one_value GPUS PERCENT)
set(multi_value)
cmake_parse_arguments(_RMM_TEST "${options}" "${one_value}" "${multi_value}" ${ARGN})
if(NOT DEFINED _RMM_TEST_GPUS AND NOT DEFINED _RMM_TEST_PERCENT)
set(_RMM_TEST_GPUS 1)
set(_RMM_TEST_PERCENT 5)
endif()
if(NOT DEFINED _RMM_TEST_GPUS)
set(_RMM_TEST_GPUS 1)
endif()
if(NOT DEFINED _RMM_TEST_PERCENT)
set(_RMM_TEST_PERCENT 100)
endif()
# Test with legacy default stream.
ConfigureTestInternal(${TEST_NAME} ${_RMM_TEST_UNPARSED_ARGUMENTS})
# Test with per-thread default stream.
string(REGEX REPLACE "_TEST$" "_PTDS_TEST" PTDS_TEST_NAME "${TEST_NAME}")
ConfigureTestInternal("${PTDS_TEST_NAME}" ${_RMM_TEST_UNPARSED_ARGUMENTS})
target_compile_definitions("${PTDS_TEST_NAME}" PUBLIC CUDA_API_PER_THREAD_DEFAULT_STREAM)
# Test with custom thrust namespace
string(REGEX REPLACE "_TEST$" "_NAMESPACE_TEST" NS_TEST_NAME "${TEST_NAME}")
ConfigureTestInternal("${NS_TEST_NAME}" ${_RMM_TEST_UNPARSED_ARGUMENTS})
target_compile_definitions("${NS_TEST_NAME}" PUBLIC THRUST_WRAPPED_NAMESPACE=rmm_thrust)
foreach(name ${TEST_NAME} ${PTDS_TEST_NAME} ${NS_TEST_NAME})
rapids_test_add(
NAME ${name}
COMMAND ${TEST_NAME}
GPUS ${_RMM_TEST_GPUS}
PERCENT ${_RMM_TEST_PERCENT}
INSTALL_COMPONENT_SET testing)
endforeach()
endfunction()
# test sources
# device mr tests
ConfigureTest(DEVICE_MR_TEST mr/device/mr_tests.cpp mr/device/mr_multithreaded_tests.cpp GPUS 1
PERCENT 90)
# device mr_ref tests
ConfigureTest(DEVICE_MR_REF_TEST mr/device/mr_ref_tests.cpp
mr/device/mr_ref_multithreaded_tests.cpp GPUS 1 PERCENT 100)
# general adaptor tests
ConfigureTest(ADAPTOR_TEST mr/device/adaptor_tests.cpp)
# pool mr tests
ConfigureTest(POOL_MR_TEST mr/device/pool_mr_tests.cpp GPUS 1 PERCENT 60)
# cuda_async mr tests
ConfigureTest(CUDA_ASYNC_MR_TEST mr/device/cuda_async_mr_tests.cpp GPUS 1 PERCENT 60)
# thrust allocator tests
ConfigureTest(THRUST_ALLOCATOR_TEST mr/device/thrust_allocator_tests.cu GPUS 1 PERCENT 60)
# polymorphic allocator tests
ConfigureTest(POLYMORPHIC_ALLOCATOR_TEST mr/device/polymorphic_allocator_tests.cpp)
# stream allocator adaptor tests
ConfigureTest(STREAM_ADAPTOR_TEST mr/device/stream_allocator_adaptor_tests.cpp)
# statistics adaptor tests
ConfigureTest(STATISTICS_TEST mr/device/statistics_mr_tests.cpp)
# tracking adaptor tests
ConfigureTest(TRACKING_TEST mr/device/tracking_mr_tests.cpp)
# out-of-memory callback adaptor tests
ConfigureTest(FAILURE_CALLBACK_TEST mr/device/failure_callback_mr_tests.cpp)
# aligned adaptor tests
ConfigureTest(ALIGNED_TEST mr/device/aligned_mr_tests.cpp)
# limiting adaptor tests
ConfigureTest(LIMITING_TEST mr/device/limiting_mr_tests.cpp)
# host mr tests
ConfigureTest(HOST_MR_TEST mr/host/mr_tests.cpp)
# host mr_ref tests
ConfigureTest(HOST_MR_REF_TEST mr/host/mr_ref_tests.cpp)
# pinned pool mr tests
ConfigureTest(PINNED_POOL_MR_TEST mr/host/pinned_pool_mr_tests.cpp)
# cuda stream tests
ConfigureTest(CUDA_STREAM_TEST cuda_stream_tests.cpp cuda_stream_pool_tests.cpp)
# device buffer tests
ConfigureTest(DEVICE_BUFFER_TEST device_buffer_tests.cu)
# device scalar tests
ConfigureTest(DEVICE_SCALAR_TEST device_scalar_tests.cpp)
# logger tests
ConfigureTest(LOGGER_TEST logger_tests.cpp)
# uvector tests
ConfigureTest(DEVICE_UVECTOR_TEST device_uvector_tests.cpp GPUS 1 PERCENT 60)
# arena MR tests
ConfigureTest(ARENA_MR_TEST mr/device/arena_mr_tests.cpp GPUS 1 PERCENT 60)
# binning MR tests
ConfigureTest(BINNING_MR_TEST mr/device/binning_mr_tests.cpp)
# callback memory resource tests
ConfigureTest(CALLBACK_MR_TEST mr/device/callback_mr_tests.cpp)
# container multidevice tests
ConfigureTest(CONTAINER_MULTIDEVICE_TEST container_multidevice_tests.cu)
rapids_test_install_relocatable(INSTALL_COMPONENT_SET testing DESTINATION bin/gtests/librmm)
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/cuda_stream_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_buffer.hpp>
#include <sstream>
#include <cuda_runtime_api.h>
#include <gtest/gtest-death-test.h>
#include <gtest/gtest.h>
struct CudaStreamTest : public ::testing::Test {};
TEST_F(CudaStreamTest, Equality)
{
rmm::cuda_stream stream_a;
auto const view_a = stream_a.view();
auto const view_default = rmm::cuda_stream_view{};
EXPECT_EQ(stream_a, view_a);
EXPECT_NE(stream_a, view_default);
EXPECT_EQ(view_default, rmm::cuda_stream_view{});
EXPECT_EQ(view_default, rmm::cuda_stream_default);
EXPECT_NE(view_a, rmm::cuda_stream());
EXPECT_NE(stream_a, rmm::cuda_stream());
rmm::device_buffer buff{};
EXPECT_EQ(buff.stream(), view_default);
EXPECT_NE(static_cast<cudaStream_t>(stream_a), rmm::cuda_stream_default.value());
}
TEST_F(CudaStreamTest, MoveConstructor)
{
rmm::cuda_stream stream_a;
auto const view_a = stream_a.view();
rmm::cuda_stream stream_b = std::move(stream_a);
// NOLINTNEXTLINE(bugprone-use-after-move, clang-analyzer-cplusplus.Move)
EXPECT_FALSE(stream_a.is_valid()); // Any other operations on stream_a are UB, may segfault
EXPECT_EQ(stream_b, view_a);
}
TEST_F(CudaStreamTest, TestStreamViewOstream)
{
rmm::cuda_stream stream_a;
rmm::cuda_stream_view view(stream_a);
std::ostringstream oss;
oss << view;
std::ostringstream oss_expected;
oss_expected << stream_a.value();
EXPECT_EQ(oss.str(), oss_expected.str());
}
// Without this we don't get test coverage of ~stream_view, presumably because it is elided
TEST_F(CudaStreamTest, TestStreamViewDestructor)
{
auto view = std::make_shared<rmm::cuda_stream_view>(rmm::cuda_stream_per_thread);
view->synchronize();
}
TEST_F(CudaStreamTest, TestSyncNoThrow)
{
rmm::cuda_stream stream_a;
EXPECT_NO_THROW(stream_a.synchronize_no_throw());
}
#ifndef NDEBUG
using CudaStreamDeathTest = CudaStreamTest;
TEST_F(CudaStreamDeathTest, TestSyncNoThrow)
{
auto test = []() {
rmm::cuda_stream stream_a;
cudaStreamDestroy(static_cast<cudaStream_t>(stream_a));
// should assert here or in `~cuda_stream()`
stream_a.synchronize_no_throw();
};
EXPECT_DEATH(test(), "Assertion");
}
#endif
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/cuda_stream_pool_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <cuda_runtime_api.h>
struct CudaStreamPoolTest : public ::testing::Test {
rmm::cuda_stream_pool pool{};
};
TEST_F(CudaStreamPoolTest, ZeroSizePoolException)
{
EXPECT_THROW(rmm::cuda_stream_pool pool{0}, rmm::logic_error);
}
TEST_F(CudaStreamPoolTest, Unequal)
{
auto const stream_a = this->pool.get_stream();
auto const stream_b = this->pool.get_stream();
EXPECT_NE(stream_a, stream_b);
}
TEST_F(CudaStreamPoolTest, Nondefault)
{
auto const stream_a = this->pool.get_stream();
// pool streams are explicit, non-default streams
EXPECT_FALSE(stream_a.is_default());
EXPECT_FALSE(stream_a.is_per_thread_default());
}
TEST_F(CudaStreamPoolTest, ValidStreams)
{
auto const stream_a = this->pool.get_stream();
auto const stream_b = this->pool.get_stream();
// Operations on the streams should work correctly and without throwing exceptions
auto constexpr vector_size{100};
auto vec1 = rmm::device_uvector<std::uint8_t>{vector_size, stream_a};
RMM_CUDA_TRY(cudaMemsetAsync(vec1.data(), 0xcc, 100, stream_a.value()));
stream_a.synchronize();
auto vec2 = rmm::device_uvector<std::uint8_t>{vec1, stream_b};
auto element = vec2.front_element(stream_b);
EXPECT_EQ(element, 0xcc);
}
TEST_F(CudaStreamPoolTest, PoolSize) { EXPECT_GE(this->pool.get_pool_size(), 1); }
TEST_F(CudaStreamPoolTest, OutOfBoundLinearAccess)
{
auto const stream_a = this->pool.get_stream(0);
auto const stream_b = this->pool.get_stream(this->pool.get_pool_size());
EXPECT_EQ(stream_a, stream_b);
}
TEST_F(CudaStreamPoolTest, ValidLinearAccess)
{
auto const stream_a = this->pool.get_stream(0);
auto const stream_b = this->pool.get_stream(1);
EXPECT_NE(stream_a, stream_b);
}
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/device_uvector_tests.cpp
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <gtest/internal/gtest-type-util.h>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
// explicit instantiation for test coverage purposes.
template class rmm::device_uvector<int32_t>;
template <typename T>
struct TypedUVectorTest : ::testing::Test {
[[nodiscard]] rmm::cuda_stream_view stream() const noexcept { return rmm::cuda_stream_view{}; }
};
using TestTypes = ::testing::Types<int8_t, int32_t, uint64_t, float, double>;
using async_resource_ref = cuda::mr::async_resource_ref<cuda::mr::device_accessible>;
TYPED_TEST_CASE(TypedUVectorTest, TestTypes);
TYPED_TEST(TypedUVectorTest, MemoryResource)
{
rmm::device_uvector<TypeParam> vec(128, this->stream());
EXPECT_EQ(vec.memory_resource(), async_resource_ref{rmm::mr::get_current_device_resource()});
}
TYPED_TEST(TypedUVectorTest, ZeroSizeConstructor)
{
rmm::device_uvector<TypeParam> vec(0, this->stream());
EXPECT_EQ(vec.size(), 0);
EXPECT_EQ(vec.end(), vec.begin());
EXPECT_TRUE(vec.is_empty());
}
TYPED_TEST(TypedUVectorTest, NonZeroSizeConstructor)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
EXPECT_EQ(vec.size(), size);
EXPECT_EQ(vec.ssize(), size);
EXPECT_NE(vec.data(), nullptr);
EXPECT_EQ(vec.end(), vec.begin() + vec.size());
EXPECT_FALSE(vec.is_empty());
EXPECT_NE(vec.element_ptr(0), nullptr);
}
TYPED_TEST(TypedUVectorTest, CopyConstructor)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
rmm::device_uvector<TypeParam> uv_copy(vec, this->stream());
EXPECT_EQ(uv_copy.size(), vec.size());
EXPECT_NE(uv_copy.data(), nullptr);
EXPECT_EQ(uv_copy.end(), uv_copy.begin() + uv_copy.size());
EXPECT_FALSE(uv_copy.is_empty());
EXPECT_NE(uv_copy.element_ptr(0), nullptr);
}
TYPED_TEST(TypedUVectorTest, ResizeSmaller)
{
auto const original_size{12345};
rmm::device_uvector<TypeParam> vec(original_size, this->stream());
auto* original_data = vec.data();
auto* original_begin = vec.begin();
auto smaller_size = vec.size() - 1;
vec.resize(smaller_size, this->stream());
EXPECT_EQ(original_data, vec.data());
EXPECT_EQ(original_begin, vec.begin());
EXPECT_EQ(vec.size(), smaller_size);
EXPECT_EQ(vec.capacity(), original_size);
// shrink_to_fit should force a new allocation
vec.shrink_to_fit(this->stream());
EXPECT_EQ(vec.size(), smaller_size);
EXPECT_EQ(vec.capacity(), smaller_size);
}
TYPED_TEST(TypedUVectorTest, ResizeLarger)
{
auto const original_size{12345};
rmm::device_uvector<TypeParam> vec(original_size, this->stream());
auto* original_data = vec.data();
auto* original_begin = vec.begin();
auto larger_size = vec.size() + 1;
vec.resize(larger_size, this->stream());
EXPECT_NE(vec.data(), original_data);
EXPECT_NE(vec.begin(), original_begin);
EXPECT_EQ(vec.size(), larger_size);
EXPECT_EQ(vec.capacity(), larger_size);
auto* larger_data = vec.data();
auto* larger_begin = vec.begin();
// shrink_to_fit shouldn't have any effect
vec.shrink_to_fit(this->stream());
EXPECT_EQ(vec.size(), larger_size);
EXPECT_EQ(vec.capacity(), larger_size);
EXPECT_EQ(vec.data(), larger_data);
EXPECT_EQ(vec.begin(), larger_begin);
}
TYPED_TEST(TypedUVectorTest, ReserveSmaller)
{
auto const original_size{12345};
rmm::device_uvector<TypeParam> vec(original_size, this->stream());
auto* const original_data = vec.data();
auto* const original_begin = vec.begin();
auto const original_capacity = vec.capacity();
auto const smaller_capacity = vec.capacity() - 1;
vec.reserve(smaller_capacity, this->stream());
EXPECT_EQ(vec.data(), original_data);
EXPECT_EQ(vec.begin(), original_begin);
EXPECT_EQ(vec.size(), original_size);
EXPECT_EQ(vec.capacity(), original_capacity);
}
TYPED_TEST(TypedUVectorTest, ReserveLarger)
{
auto const original_size{12345};
rmm::device_uvector<TypeParam> vec(original_size, this->stream());
vec.set_element(0, 1, this->stream());
auto* const original_data = vec.data();
auto* const original_begin = vec.begin();
auto const larger_capacity = vec.capacity() + 1;
vec.reserve(larger_capacity, this->stream());
EXPECT_NE(vec.data(), original_data);
EXPECT_NE(vec.begin(), original_begin);
EXPECT_EQ(vec.size(), original_size);
EXPECT_EQ(vec.capacity(), larger_capacity);
// The element should be copied
EXPECT_EQ(vec.element(0, this->stream()), 1);
}
TYPED_TEST(TypedUVectorTest, ResizeToZero)
{
auto const original_size{12345};
rmm::device_uvector<TypeParam> vec(original_size, this->stream());
vec.resize(0, this->stream());
EXPECT_EQ(vec.size(), 0);
EXPECT_TRUE(vec.is_empty());
EXPECT_EQ(vec.capacity(), original_size);
vec.shrink_to_fit(this->stream());
EXPECT_EQ(vec.capacity(), 0);
}
TYPED_TEST(TypedUVectorTest, Release)
{
auto const original_size{12345};
rmm::device_uvector<TypeParam> vec(original_size, this->stream());
auto* original_data = vec.data();
rmm::device_buffer storage = vec.release();
EXPECT_EQ(vec.size(), 0);
EXPECT_EQ(vec.capacity(), 0);
EXPECT_TRUE(vec.is_empty());
EXPECT_EQ(storage.data(), original_data);
EXPECT_EQ(storage.size(), original_size * sizeof(TypeParam));
}
TYPED_TEST(TypedUVectorTest, ElementPointer)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
for (std::size_t i = 0; i < vec.size(); ++i) {
EXPECT_NE(vec.element_ptr(i), nullptr);
}
}
TYPED_TEST(TypedUVectorTest, OOBSetElement)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
EXPECT_THROW(vec.set_element(vec.size() + 1, 42, this->stream()), rmm::out_of_range);
}
TYPED_TEST(TypedUVectorTest, OOBGetElement)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
// avoid error due to nodiscard function
auto foo = [&]() { return vec.element(vec.size() + 1, this->stream()); };
EXPECT_THROW(foo(), rmm::out_of_range);
}
TYPED_TEST(TypedUVectorTest, GetSetElement)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
for (std::size_t i = 0; i < vec.size(); ++i) {
vec.set_element(i, i, this->stream());
EXPECT_EQ(static_cast<TypeParam>(i), vec.element(i, this->stream()));
}
}
TYPED_TEST(TypedUVectorTest, GetSetElementAsync)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
for (std::size_t i = 0; i < vec.size(); ++i) {
auto init = static_cast<TypeParam>(i);
vec.set_element_async(i, init, this->stream());
EXPECT_EQ(init, vec.element(i, this->stream()));
}
}
TYPED_TEST(TypedUVectorTest, SetElementZeroAsync)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
for (std::size_t i = 0; i < vec.size(); ++i) {
vec.set_element_to_zero_async(i, this->stream());
EXPECT_EQ(TypeParam{0}, vec.element(i, this->stream()));
}
}
TYPED_TEST(TypedUVectorTest, FrontBackElement)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
auto const first = TypeParam{42};
auto const last = TypeParam{13};
vec.set_element(0, first, this->stream());
vec.set_element(vec.size() - 1, last, this->stream());
EXPECT_EQ(first, vec.front_element(this->stream()));
EXPECT_EQ(last, vec.back_element(this->stream()));
}
TYPED_TEST(TypedUVectorTest, SetGetStream)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
EXPECT_EQ(vec.stream(), this->stream());
rmm::cuda_stream_view const otherstream{cudaStreamPerThread};
vec.set_stream(otherstream);
EXPECT_EQ(vec.stream(), otherstream);
}
TYPED_TEST(TypedUVectorTest, Iterators)
{
auto const size{12345};
rmm::device_uvector<TypeParam> vec(size, this->stream());
EXPECT_EQ(vec.begin(), vec.data());
EXPECT_EQ(vec.cbegin(), vec.data());
auto const* const_begin = std::as_const(vec).begin();
EXPECT_EQ(const_begin, vec.cbegin());
EXPECT_EQ(std::distance(vec.begin(), vec.end()), vec.size());
EXPECT_EQ(std::distance(vec.cbegin(), vec.cend()), vec.size());
auto const* const_end = std::as_const(vec).end();
EXPECT_EQ(const_end, vec.cend());
}
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/logger_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "./byte_literals.hpp"
#include <benchmarks/utilities/log_parser.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/logging_resource_adaptor.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <cstdlib>
#include <filesystem>
#include <thread>
namespace rmm::test {
namespace {
class raii_restore_env {
public:
raii_restore_env(char const* name) : name_(name)
{
auto* const value_or_null = getenv(name);
if (value_or_null != nullptr) {
value_ = value_or_null;
is_set_ = true;
}
}
~raii_restore_env()
{
if (is_set_) {
setenv(name_.c_str(), value_.c_str(), 1);
} else {
unsetenv(name_.c_str());
}
}
raii_restore_env(raii_restore_env const&) = default;
raii_restore_env& operator=(raii_restore_env const&) = default;
raii_restore_env(raii_restore_env&&) = default;
raii_restore_env& operator=(raii_restore_env&&) = default;
private:
std::string name_{};
std::string value_{};
bool is_set_{false};
};
class raii_temp_directory {
public:
raii_temp_directory()
{
std::string random_path{std::filesystem::temp_directory_path().string()};
random_path += "/rmm_XXXXXX";
auto const ptr = mkdtemp(const_cast<char*>(random_path.data()));
EXPECT_TRUE((ptr != nullptr));
directory_path_ = std::filesystem::path{random_path};
}
~raii_temp_directory() { std::filesystem::remove_all(directory_path_); }
raii_temp_directory& operator=(raii_temp_directory const&) = delete;
raii_temp_directory(raii_temp_directory const&) = delete;
[[nodiscard]] std::string generate_path(std::string filename) const
{
return directory_path_ / filename;
}
private:
std::filesystem::path directory_path_{};
};
/**
* @brief Verifies the specified log file contains the expected events.
*
* Events in the log file are expected to occur in the same order as in `expected_events`.
*
* @note: This function accounts for the fact that `device_memory_resource` automatically pads
* allocations to a multiple of 8 bytes by rounding up the expected allocation sizes to a multiple
* of 8.
*
* @param filename Name of CSV log file generated from `logging_resource_adaptor`
* @param expected_events List of expected (de)allocation events
*/
void expect_log_events(std::string const& filename,
std::vector<rmm::detail::event> const& expected_events)
{
auto actual_events = rmm::detail::parse_csv(filename);
std::equal(expected_events.begin(),
expected_events.end(),
actual_events.begin(),
[](auto expected, auto actual) {
// We don't test the logged thread id since it may be different from what we record.
// The actual value doesn't matter so long as events from different threads have
// different ids
// EXPECT_EQ(expected.thread_id, actual.thread_id);
// EXPECT_EQ(expected.stream, actual.stream);
EXPECT_EQ(expected.act, actual.act);
// device_memory_resource automatically pads an allocation to a multiple of 8 bytes
EXPECT_EQ(expected.size, actual.size);
EXPECT_EQ(expected.pointer, actual.pointer);
return true;
});
}
TEST(Adaptor, FilenameConstructor)
{
raii_temp_directory temp_dir;
std::string filename{temp_dir.generate_path("test.txt")};
rmm::mr::cuda_memory_resource upstream;
rmm::mr::logging_resource_adaptor<rmm::mr::cuda_memory_resource> log_mr{&upstream, filename};
auto const size0{100};
auto const size1{42};
auto* ptr0 = log_mr.allocate(size0);
auto* ptr1 = log_mr.allocate(size1);
log_mr.deallocate(ptr0, size0);
log_mr.deallocate(ptr1, size1);
log_mr.flush();
using rmm::detail::action;
using rmm::detail::event;
std::vector<event> expected_events{{action::ALLOCATE, size0, ptr0},
{action::ALLOCATE, size1, ptr1},
{action::FREE, size0, ptr0},
{action::FREE, size1, ptr1}};
expect_log_events(filename, expected_events);
}
TEST(Adaptor, MultiSinkConstructor)
{
raii_temp_directory temp_dir;
std::string filename1{temp_dir.generate_path("test_multi_1.txt")};
std::string filename2{temp_dir.generate_path("test_multi_2.txt")};
rmm::mr::cuda_memory_resource upstream;
auto file_sink1 = std::make_shared<spdlog::sinks::basic_file_sink_mt>(filename1, true);
auto file_sink2 = std::make_shared<spdlog::sinks::basic_file_sink_mt>(filename2, true);
rmm::mr::logging_resource_adaptor<rmm::mr::cuda_memory_resource> log_mr{&upstream,
{file_sink1, file_sink2}};
auto const size0{100};
auto const size1{42};
auto* ptr0 = log_mr.allocate(size0);
auto* ptr1 = log_mr.allocate(size1);
log_mr.deallocate(ptr0, size0);
log_mr.deallocate(ptr1, size1);
log_mr.flush();
using rmm::detail::action;
using rmm::detail::event;
std::vector<event> expected_events{{action::ALLOCATE, size0, ptr0},
{action::ALLOCATE, size1, ptr1},
{action::FREE, size0, ptr0},
{action::FREE, size1, ptr1}};
expect_log_events(filename1, expected_events);
expect_log_events(filename2, expected_events);
}
TEST(Adaptor, Factory)
{
raii_temp_directory temp_dir;
std::string filename{temp_dir.generate_path("test.txt")};
rmm::mr::cuda_memory_resource upstream;
auto log_mr = rmm::mr::make_logging_adaptor(&upstream, filename);
auto const size0{99};
auto const size1{42};
auto* ptr0 = log_mr.allocate(size0);
log_mr.deallocate(ptr0, size0);
auto* ptr1 = log_mr.allocate(size1);
log_mr.deallocate(ptr1, size1);
log_mr.flush();
using rmm::detail::action;
using rmm::detail::event;
std::vector<event> expected_events{{action::ALLOCATE, size0, ptr0},
{action::FREE, size0, ptr0},
{action::ALLOCATE, size1, ptr1},
{action::FREE, size1, ptr1}};
expect_log_events(filename, expected_events);
}
TEST(Adaptor, EnvironmentPath)
{
raii_temp_directory temp_dir;
rmm::mr::cuda_memory_resource upstream;
// restore the original value (or unset) after test
raii_restore_env old_env("RMM_LOG_FILE");
unsetenv("RMM_LOG_FILE");
// expect logging adaptor to fail if RMM_LOG_FILE is unset
EXPECT_THROW(rmm::mr::make_logging_adaptor(&upstream), rmm::logic_error);
std::string filename{temp_dir.generate_path("test.txt")};
setenv("RMM_LOG_FILE", filename.c_str(), 1);
// use log file location specified in environment variable RMM_LOG_FILE
auto log_mr = rmm::mr::make_logging_adaptor(&upstream);
auto const size{100};
auto* ptr = log_mr.allocate(size);
log_mr.deallocate(ptr, size);
log_mr.flush();
using rmm::detail::action;
using rmm::detail::event;
std::vector<event> expected_events{
{action::ALLOCATE, size, ptr},
{action::FREE, size, ptr},
};
expect_log_events(filename, expected_events);
}
TEST(Adaptor, AllocateFailure)
{
raii_temp_directory temp_dir;
std::string filename{temp_dir.generate_path("failure.txt")};
rmm::mr::cuda_memory_resource upstream;
auto log_mr = rmm::mr::make_logging_adaptor(&upstream, filename);
auto const size0{99};
auto const size1{1_TiB};
auto* ptr0 = log_mr.allocate(size0);
log_mr.deallocate(ptr0, size0);
try {
log_mr.allocate(size1);
} catch (...) {
}
log_mr.flush();
using rmm::detail::action;
using rmm::detail::event;
std::vector<event> expected_events{{action::ALLOCATE, size0, ptr0},
{action::FREE, size0, ptr0},
{action::ALLOCATE_FAILURE, size1, nullptr}};
expect_log_events(filename, expected_events);
}
TEST(Adaptor, STDOUT)
{
testing::internal::CaptureStdout();
rmm::mr::cuda_memory_resource upstream;
auto log_mr = rmm::mr::make_logging_adaptor(&upstream, std::cout);
auto const size{100};
auto* ptr = log_mr.allocate(size);
log_mr.deallocate(ptr, size);
std::string output = testing::internal::GetCapturedStdout();
std::string header = output.substr(0, output.find('\n'));
ASSERT_EQ(header, log_mr.header());
}
TEST(Adaptor, STDERR)
{
testing::internal::CaptureStderr();
rmm::mr::cuda_memory_resource upstream;
auto log_mr = rmm::mr::make_logging_adaptor(&upstream, std::cerr);
auto const size{100};
auto* ptr = log_mr.allocate(size);
log_mr.deallocate(ptr, size);
std::string output = testing::internal::GetCapturedStderr();
std::string header = output.substr(0, output.find('\n'));
ASSERT_EQ(header, log_mr.header());
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/device_check_resource_adaptor.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_device.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <gtest/gtest.h>
class device_check_resource_adaptor final : public rmm::mr::device_memory_resource {
public:
device_check_resource_adaptor(rmm::mr::device_memory_resource* upstream)
: device_id{rmm::get_current_cuda_device()}, upstream_(upstream)
{
}
[[nodiscard]] bool supports_streams() const noexcept override
{
return upstream_->supports_streams();
}
[[nodiscard]] bool supports_get_mem_info() const noexcept override
{
return upstream_->supports_get_mem_info();
}
[[nodiscard]] device_memory_resource* get_upstream() const noexcept { return upstream_; }
private:
[[nodiscard]] bool check_device_id() const { return device_id == rmm::get_current_cuda_device(); }
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view stream) override
{
bool const is_correct_device = check_device_id();
EXPECT_TRUE(is_correct_device);
if (is_correct_device) { return upstream_->allocate(bytes, stream); }
return nullptr;
}
void do_deallocate(void* ptr, std::size_t bytes, rmm::cuda_stream_view stream) override
{
bool const is_correct_device = check_device_id();
EXPECT_TRUE(is_correct_device);
if (is_correct_device) { upstream_->deallocate(ptr, bytes, stream); }
}
[[nodiscard]] bool do_is_equal(
rmm::mr::device_memory_resource const& other) const noexcept override
{
if (this == &other) { return true; }
auto const* cast = dynamic_cast<device_check_resource_adaptor const*>(&other);
if (cast != nullptr) { return upstream_->is_equal(*cast->get_upstream()); }
return upstream_->is_equal(other);
}
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
rmm::cuda_stream_view stream) const override
{
return upstream_->get_mem_info(stream);
}
rmm::cuda_device_id device_id;
rmm::mr::device_memory_resource* upstream_{};
};
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/tests/mock_resource.hpp
|
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/mr/device/device_memory_resource.hpp>
#include <gmock/gmock.h>
namespace rmm::test {
class mock_resource : public rmm::mr::device_memory_resource {
public:
MOCK_METHOD(bool, supports_streams, (), (const, override, noexcept));
MOCK_METHOD(bool, supports_get_mem_info, (), (const, override, noexcept));
MOCK_METHOD(void*, do_allocate, (std::size_t, cuda_stream_view), (override));
MOCK_METHOD(void, do_deallocate, (void*, std::size_t, cuda_stream_view), (override));
using size_pair = std::pair<std::size_t, std::size_t>;
MOCK_METHOD(size_pair, do_get_mem_info, (cuda_stream_view), (const, override));
};
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/host/pinned_pool_mr_tests.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/aligned.hpp>
#include <rmm/detail/cuda_util.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <rmm/mr/host/pinned_memory_resource.hpp>
#include <gtest/gtest.h>
// explicit instantiation for test coverage purposes
template class rmm::mr::pool_memory_resource<rmm::mr::pinned_memory_resource>;
namespace rmm::test {
namespace {
using pool_mr = rmm::mr::pool_memory_resource<rmm::mr::pinned_memory_resource>;
TEST(PinnedPoolTest, ThrowOnNullUpstream)
{
auto construct_nullptr = []() { pool_mr mr{nullptr}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST(PinnedPoolTest, ThrowMaxLessThanInitial)
{
// Make sure first argument is enough larger than the second that alignment rounding doesn't
// make them equal
auto max_less_than_initial = []() {
rmm::mr::pinned_memory_resource pinned_mr{};
const auto initial{1024};
const auto maximum{256};
pool_mr mr{&pinned_mr, initial, maximum};
};
EXPECT_THROW(max_less_than_initial(), rmm::logic_error);
}
TEST(PinnedPoolTest, ReferenceThrowMaxLessThanInitial)
{
// Make sure first argument is enough larger than the second that alignment rounding doesn't
// make them equal
auto max_less_than_initial = []() {
rmm::mr::pinned_memory_resource pinned_mr{};
const auto initial{1024};
const auto maximum{256};
pool_mr mr{pinned_mr, initial, maximum};
};
EXPECT_THROW(max_less_than_initial(), rmm::logic_error);
}
// Issue #527
TEST(PinnedPoolTest, InitialAndMaxPoolSizeEqual)
{
EXPECT_NO_THROW([]() {
rmm::mr::pinned_memory_resource pinned_mr{};
pool_mr mr(pinned_mr, 1000192, 1000192);
mr.allocate(1000);
}());
}
TEST(PinnedPoolTest, NonAlignedPoolSize)
{
EXPECT_THROW(
[]() {
rmm::mr::pinned_memory_resource pinned_mr{};
pool_mr mr(pinned_mr, 1000031, 1000192);
mr.allocate(1000);
}(),
rmm::logic_error);
EXPECT_THROW(
[]() {
rmm::mr::pinned_memory_resource pinned_mr{};
pool_mr mr(pinned_mr, 1000192, 1000200);
mr.allocate(1000);
}(),
rmm::logic_error);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/host/mr_ref_tests.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include <rmm/detail/aligned.hpp>
#include <rmm/mr/host/host_memory_resource.hpp>
#include <rmm/mr/host/new_delete_resource.hpp>
#include <rmm/mr/host/pinned_memory_resource.hpp>
#include <cuda_runtime_api.h>
#include <cuda/memory_resource>
#include <gtest/gtest.h>
#include <cstddef>
#include <deque>
#include <random>
namespace rmm::test {
namespace {
inline bool is_aligned(void* ptr, std::size_t alignment = alignof(std::max_align_t))
{
return rmm::detail::is_pointer_aligned(ptr, alignment);
}
// Returns true if a pointer points to a device memory or managed memory allocation.
inline bool is_device_memory(void* ptr)
{
cudaPointerAttributes attributes{};
if (cudaSuccess != cudaPointerGetAttributes(&attributes, ptr)) { return false; }
return (attributes.type == cudaMemoryTypeDevice) or (attributes.type == cudaMemoryTypeManaged);
}
/**
* @brief Returns if a pointer `p` points to pinned host memory.
*/
inline bool is_pinned_memory(void* ptr)
{
cudaPointerAttributes attributes{};
if (cudaSuccess != cudaPointerGetAttributes(&attributes, ptr)) { return false; }
return attributes.type == cudaMemoryTypeHost;
}
constexpr std::size_t size_word{4_B};
constexpr std::size_t size_kb{1_KiB};
constexpr std::size_t size_mb{1_MiB};
constexpr std::size_t size_gb{1_GiB};
constexpr std::size_t size_pb{1_PiB};
struct allocation {
void* ptr{nullptr};
std::size_t size{0};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
allocation() = default;
};
} // namespace
template <typename MemoryResourceType>
struct MRRefTest : public ::testing::Test {
MemoryResourceType mr;
cuda::mr::resource_ref<cuda::mr::host_accessible> ref;
MRRefTest() : mr{}, ref{mr} {}
};
using resources = ::testing::Types<rmm::mr::new_delete_resource, rmm::mr::pinned_memory_resource>;
static_assert(cuda::mr::resource_with<rmm::mr::new_delete_resource, cuda::mr::host_accessible>);
static_assert(cuda::mr::resource_with<rmm::mr::pinned_memory_resource, cuda::mr::host_accessible>);
TYPED_TEST_CASE(MRRefTest, resources);
TYPED_TEST(MRRefTest, SelfEquality) { EXPECT_TRUE(this->ref == this->ref); }
TYPED_TEST(MRRefTest, AllocateZeroBytes)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->ref.allocate(0));
EXPECT_NO_THROW(this->ref.deallocate(ptr, 0));
}
TYPED_TEST(MRRefTest, AllocateWord)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->ref.allocate(size_word));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->ref.deallocate(ptr, size_word));
}
TYPED_TEST(MRRefTest, AllocateKB)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->ref.allocate(size_kb));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->ref.deallocate(ptr, size_kb));
}
TYPED_TEST(MRRefTest, AllocateMB)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->ref.allocate(size_mb));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->ref.deallocate(ptr, size_mb));
}
TYPED_TEST(MRRefTest, AllocateGB)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->ref.allocate(size_gb));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->ref.deallocate(ptr, size_gb));
}
TYPED_TEST(MRRefTest, AllocateTooMuch)
{
void* ptr{nullptr};
EXPECT_THROW(ptr = this->ref.allocate(size_pb), std::bad_alloc);
EXPECT_EQ(nullptr, ptr);
}
TYPED_TEST(MRRefTest, RandomAllocations)
{
constexpr std::size_t num_allocations{100};
std::vector<allocation> allocations(num_allocations);
constexpr std::size_t MAX_ALLOCATION_SIZE{5 * size_mb};
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(1, MAX_ALLOCATION_SIZE);
// 100 allocations from [0,5MB)
std::for_each(
allocations.begin(), allocations.end(), [&generator, &distribution, this](allocation& alloc) {
alloc.size = distribution(generator);
EXPECT_NO_THROW(alloc.ptr = this->ref.allocate(alloc.size));
EXPECT_NE(nullptr, alloc.ptr);
EXPECT_TRUE(is_aligned(alloc.ptr));
});
std::for_each(allocations.begin(), allocations.end(), [this](allocation& alloc) {
EXPECT_NO_THROW(this->ref.deallocate(alloc.ptr, alloc.size));
});
}
TYPED_TEST(MRRefTest, MixedRandomAllocationFree)
{
std::default_random_engine generator;
constexpr std::size_t MAX_ALLOCATION_SIZE{10 * size_mb};
std::uniform_int_distribution<std::size_t> size_distribution(1, MAX_ALLOCATION_SIZE);
// How often a free will occur. For example, if `1`, then every allocation
// will immediately be free'd. Or, if 4, on average, a free will occur after
// every 4th allocation
constexpr std::size_t FREE_FREQUENCY{4};
std::uniform_int_distribution<int> free_distribution(1, FREE_FREQUENCY);
std::deque<allocation> allocations;
constexpr std::size_t num_allocations{100};
for (std::size_t i = 0; i < num_allocations; ++i) {
std::size_t allocation_size = size_distribution(generator);
EXPECT_NO_THROW(allocations.emplace_back(this->ref.allocate(allocation_size), allocation_size));
auto new_allocation = allocations.back();
EXPECT_NE(nullptr, new_allocation.ptr);
EXPECT_TRUE(is_aligned(new_allocation.ptr));
bool const free_front{free_distribution(generator) == free_distribution.max()};
if (free_front) {
auto front = allocations.front();
EXPECT_NO_THROW(this->ref.deallocate(front.ptr, front.size));
allocations.pop_front();
}
}
// free any remaining allocations
for (auto alloc : allocations) {
EXPECT_NO_THROW(this->ref.deallocate(alloc.ptr, alloc.size));
allocations.pop_front();
}
}
static constexpr std::size_t MinTestedAlignment{16};
static constexpr std::size_t MaxTestedAlignment{4096};
static constexpr std::size_t TestedAlignmentMultiplier{2};
static constexpr std::size_t NUM_TRIALS{100};
TYPED_TEST(MRRefTest, AlignmentTest)
{
std::default_random_engine generator(0);
constexpr std::size_t MAX_ALLOCATION_SIZE{10 * size_mb};
std::uniform_int_distribution<std::size_t> size_distribution(1, MAX_ALLOCATION_SIZE);
for (std::size_t num_trials = 0; num_trials < NUM_TRIALS; ++num_trials) {
for (std::size_t alignment = MinTestedAlignment; alignment <= MaxTestedAlignment;
alignment *= TestedAlignmentMultiplier) {
auto allocation_size = size_distribution(generator);
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->ref.allocate(allocation_size, alignment));
EXPECT_TRUE(is_aligned(ptr, alignment));
EXPECT_NO_THROW(this->ref.deallocate(ptr, allocation_size, alignment));
}
}
}
TYPED_TEST(MRRefTest, UnsupportedAlignmentTest)
{
std::default_random_engine generator(0);
constexpr std::size_t MAX_ALLOCATION_SIZE{10 * size_mb};
std::uniform_int_distribution<std::size_t> size_distribution(1, MAX_ALLOCATION_SIZE);
for (std::size_t num_trials = 0; num_trials < NUM_TRIALS; ++num_trials) {
for (std::size_t alignment = MinTestedAlignment; alignment <= MaxTestedAlignment;
alignment *= TestedAlignmentMultiplier) {
auto allocation_size = size_distribution(generator);
void* ptr{nullptr};
// An unsupported alignment (like an odd number) should result in an
// alignment of `alignof(std::max_align_t)`
auto const bad_alignment = alignment + 1;
EXPECT_NO_THROW(ptr = this->ref.allocate(allocation_size, bad_alignment));
EXPECT_TRUE(is_aligned(ptr, alignof(std::max_align_t)));
EXPECT_NO_THROW(this->ref.deallocate(ptr, allocation_size, bad_alignment));
}
}
}
TEST(PinnedResource, isPinned)
{
rmm::mr::pinned_memory_resource mr;
cuda::mr::resource_ref<cuda::mr::host_accessible> ref{mr};
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = ref.allocate(100));
EXPECT_TRUE(is_pinned_memory(ptr));
EXPECT_NO_THROW(ref.deallocate(ptr, 100));
}
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/host/mr_tests.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include <rmm/detail/aligned.hpp>
#include <rmm/mr/host/host_memory_resource.hpp>
#include <rmm/mr/host/new_delete_resource.hpp>
#include <rmm/mr/host/pinned_memory_resource.hpp>
#include <cuda_runtime_api.h>
#include <cuda/memory_resource>
#include <gtest/gtest.h>
#include <cstddef>
#include <deque>
#include <random>
namespace rmm::test {
namespace {
inline bool is_aligned(void* ptr, std::size_t alignment = alignof(std::max_align_t))
{
return rmm::detail::is_pointer_aligned(ptr, alignment);
}
// Returns true if a pointer points to a device memory or managed memory allocation.
inline bool is_device_memory(void* ptr)
{
cudaPointerAttributes attributes{};
if (cudaSuccess != cudaPointerGetAttributes(&attributes, ptr)) { return false; }
return (attributes.type == cudaMemoryTypeDevice) or (attributes.type == cudaMemoryTypeManaged);
}
/**
* @brief Returns if a pointer `p` points to pinned host memory.
*/
inline bool is_pinned_memory(void* ptr)
{
cudaPointerAttributes attributes{};
if (cudaSuccess != cudaPointerGetAttributes(&attributes, ptr)) { return false; }
return attributes.type == cudaMemoryTypeHost;
}
constexpr std::size_t size_word{4_B};
constexpr std::size_t size_kb{1_KiB};
constexpr std::size_t size_mb{1_MiB};
constexpr std::size_t size_gb{1_GiB};
constexpr std::size_t size_pb{1_PiB};
struct allocation {
void* ptr{nullptr};
std::size_t size{0};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
allocation() = default;
};
} // namespace
template <typename MemoryResourceType>
struct MRTest : public ::testing::Test {
std::unique_ptr<rmm::mr::host_memory_resource> mr;
MRTest() : mr{new MemoryResourceType} {}
};
using resources = ::testing::Types<rmm::mr::new_delete_resource, rmm::mr::pinned_memory_resource>;
static_assert(cuda::mr::resource_with<rmm::mr::new_delete_resource, cuda::mr::host_accessible>);
static_assert(cuda::mr::resource_with<rmm::mr::pinned_memory_resource, cuda::mr::host_accessible>);
TYPED_TEST_CASE(MRTest, resources);
TYPED_TEST(MRTest, SelfEquality) { EXPECT_TRUE(this->mr->is_equal(*this->mr)); }
TYPED_TEST(MRTest, AllocateZeroBytes)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->mr->allocate(0));
EXPECT_NO_THROW(this->mr->deallocate(ptr, 0));
}
TYPED_TEST(MRTest, AllocateWord)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->mr->allocate(size_word));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->mr->deallocate(ptr, size_word));
}
TYPED_TEST(MRTest, AllocateKB)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->mr->allocate(size_kb));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->mr->deallocate(ptr, size_kb));
}
TYPED_TEST(MRTest, AllocateMB)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->mr->allocate(size_mb));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->mr->deallocate(ptr, size_mb));
}
TYPED_TEST(MRTest, AllocateGB)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->mr->allocate(size_gb));
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(is_aligned(ptr));
EXPECT_FALSE(is_device_memory(ptr));
EXPECT_NO_THROW(this->mr->deallocate(ptr, size_gb));
}
TYPED_TEST(MRTest, AllocateTooMuch)
{
void* ptr{nullptr};
EXPECT_THROW(ptr = this->mr->allocate(size_pb), std::bad_alloc);
EXPECT_EQ(nullptr, ptr);
}
TYPED_TEST(MRTest, RandomAllocations)
{
constexpr std::size_t num_allocations{100};
std::vector<allocation> allocations(num_allocations);
constexpr std::size_t MAX_ALLOCATION_SIZE{5 * size_mb};
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(1, MAX_ALLOCATION_SIZE);
// 100 allocations from [0,5MB)
std::for_each(
allocations.begin(), allocations.end(), [&generator, &distribution, this](allocation& alloc) {
alloc.size = distribution(generator);
EXPECT_NO_THROW(alloc.ptr = this->mr->allocate(alloc.size));
EXPECT_NE(nullptr, alloc.ptr);
EXPECT_TRUE(is_aligned(alloc.ptr));
});
std::for_each(allocations.begin(), allocations.end(), [this](allocation& alloc) {
EXPECT_NO_THROW(this->mr->deallocate(alloc.ptr, alloc.size));
});
}
TYPED_TEST(MRTest, MixedRandomAllocationFree)
{
std::default_random_engine generator;
constexpr std::size_t MAX_ALLOCATION_SIZE{10 * size_mb};
std::uniform_int_distribution<std::size_t> size_distribution(1, MAX_ALLOCATION_SIZE);
// How often a free will occur. For example, if `1`, then every allocation
// will immediately be free'd. Or, if 4, on average, a free will occur after
// every 4th allocation
constexpr std::size_t FREE_FREQUENCY{4};
std::uniform_int_distribution<int> free_distribution(1, FREE_FREQUENCY);
std::deque<allocation> allocations;
constexpr std::size_t num_allocations{100};
for (std::size_t i = 0; i < num_allocations; ++i) {
std::size_t allocation_size = size_distribution(generator);
EXPECT_NO_THROW(allocations.emplace_back(this->mr->allocate(allocation_size), allocation_size));
auto new_allocation = allocations.back();
EXPECT_NE(nullptr, new_allocation.ptr);
EXPECT_TRUE(is_aligned(new_allocation.ptr));
bool const free_front{free_distribution(generator) == free_distribution.max()};
if (free_front) {
auto front = allocations.front();
EXPECT_NO_THROW(this->mr->deallocate(front.ptr, front.size));
allocations.pop_front();
}
}
// free any remaining allocations
for (auto alloc : allocations) {
EXPECT_NO_THROW(this->mr->deallocate(alloc.ptr, alloc.size));
allocations.pop_front();
}
}
static constexpr std::size_t MinTestedAlignment{16};
static constexpr std::size_t MaxTestedAlignment{4096};
static constexpr std::size_t TestedAlignmentMultiplier{2};
static constexpr std::size_t NUM_TRIALS{100};
TYPED_TEST(MRTest, AlignmentTest)
{
std::default_random_engine generator(0);
constexpr std::size_t MAX_ALLOCATION_SIZE{10 * size_mb};
std::uniform_int_distribution<std::size_t> size_distribution(1, MAX_ALLOCATION_SIZE);
for (std::size_t num_trials = 0; num_trials < NUM_TRIALS; ++num_trials) {
for (std::size_t alignment = MinTestedAlignment; alignment <= MaxTestedAlignment;
alignment *= TestedAlignmentMultiplier) {
auto allocation_size = size_distribution(generator);
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->mr->allocate(allocation_size, alignment));
EXPECT_TRUE(is_aligned(ptr, alignment));
EXPECT_NO_THROW(this->mr->deallocate(ptr, allocation_size, alignment));
}
}
}
TYPED_TEST(MRTest, UnsupportedAlignmentTest)
{
std::default_random_engine generator(0);
constexpr std::size_t MAX_ALLOCATION_SIZE{10 * size_mb};
std::uniform_int_distribution<std::size_t> size_distribution(1, MAX_ALLOCATION_SIZE);
for (std::size_t num_trials = 0; num_trials < NUM_TRIALS; ++num_trials) {
for (std::size_t alignment = MinTestedAlignment; alignment <= MaxTestedAlignment;
alignment *= TestedAlignmentMultiplier) {
auto allocation_size = size_distribution(generator);
void* ptr{nullptr};
// An unsupported alignment (like an odd number) should result in an
// alignment of `alignof(std::max_align_t)`
auto const bad_alignment = alignment + 1;
EXPECT_NO_THROW(ptr = this->mr->allocate(allocation_size, bad_alignment));
EXPECT_TRUE(is_aligned(ptr, alignof(std::max_align_t)));
EXPECT_NO_THROW(this->mr->deallocate(ptr, allocation_size, bad_alignment));
}
}
}
TEST(PinnedResource, isPinned)
{
rmm::mr::pinned_memory_resource mr;
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = mr.allocate(100));
EXPECT_TRUE(is_pinned_memory(ptr));
EXPECT_NO_THROW(mr.deallocate(ptr, 100));
}
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/limiting_mr_tests.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/limiting_resource_adaptor.hpp>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
using Limiting_adaptor = rmm::mr::limiting_resource_adaptor<rmm::mr::device_memory_resource>;
TEST(LimitingTest, ThrowOnNullUpstream)
{
auto const max_size{5_MiB};
auto construct_nullptr = []() { Limiting_adaptor mr{nullptr, max_size}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST(LimitingTest, TooBig)
{
auto const max_size{5_MiB};
Limiting_adaptor mr{rmm::mr::get_current_device_resource(), max_size};
EXPECT_THROW(mr.allocate(max_size + 1), rmm::out_of_memory);
}
TEST(LimitingTest, UpstreamFailure)
{
auto const max_size_1{2_MiB};
auto const max_size_2{5_MiB};
Limiting_adaptor mr1{rmm::mr::get_current_device_resource(), max_size_1};
Limiting_adaptor mr2{&mr1, max_size_2};
EXPECT_THROW(mr2.allocate(4_MiB), rmm::out_of_memory);
}
TEST(LimitingTest, UnderLimitDueToFrees)
{
auto const max_size{10_MiB};
Limiting_adaptor mr{rmm::mr::get_current_device_resource(), max_size};
auto const size1{4_MiB};
auto* ptr1 = mr.allocate(size1);
auto allocated_bytes = size1;
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
auto* ptr2 = mr.allocate(size1);
allocated_bytes += size1;
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
mr.deallocate(ptr1, size1);
allocated_bytes -= size1;
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
// note that we don't keep track of fragmentation or things like page size
// so this should fill 100% of the memory even though it is probably over.
auto const size2{6_MiB};
auto* ptr3 = mr.allocate(size2);
allocated_bytes += size2;
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), 0);
mr.deallocate(ptr2, size1);
mr.deallocate(ptr3, size2);
}
TEST(LimitingTest, OverLimit)
{
auto const max_size{10_MiB};
Limiting_adaptor mr{rmm::mr::get_current_device_resource(), max_size};
auto const size1{4_MiB};
auto* ptr1 = mr.allocate(size1);
auto allocated_bytes = size1;
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
auto* ptr2 = mr.allocate(size1);
allocated_bytes += size1;
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
auto const size2{3_MiB};
EXPECT_THROW(mr.allocate(size2), rmm::out_of_memory);
EXPECT_EQ(mr.get_allocated_bytes(), allocated_bytes);
EXPECT_EQ(mr.get_allocation_limit() - mr.get_allocated_bytes(), max_size - allocated_bytes);
mr.deallocate(ptr1, 4_MiB);
mr.deallocate(ptr2, 4_MiB);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/cuda_async_mr_tests.cpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_device.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
using cuda_async_mr = rmm::mr::cuda_async_memory_resource;
static_assert(cuda::mr::resource_with<cuda_async_mr, cuda::mr::device_accessible>);
static_assert(cuda::mr::async_resource_with<cuda_async_mr, cuda::mr::device_accessible>);
class AsyncMRTest : public ::testing::Test {
protected:
void SetUp() override
{
if (!rmm::detail::async_alloc::is_supported()) {
GTEST_SKIP() << "Skipping tests since cudaMallocAsync not supported with this CUDA "
<< "driver/runtime version";
}
}
};
TEST_F(AsyncMRTest, ThrowIfNotSupported)
{
auto construct_mr = []() { cuda_async_mr mr; };
#ifndef RMM_CUDA_MALLOC_ASYNC_SUPPORT
EXPECT_THROW(construct_mr(), rmm::logic_error);
#else
EXPECT_NO_THROW(construct_mr());
#endif
}
#if defined(RMM_CUDA_MALLOC_ASYNC_SUPPORT)
TEST_F(AsyncMRTest, ExplicitInitialPoolSize)
{
const auto pool_init_size{100};
cuda_async_mr mr{pool_init_size};
void* ptr = mr.allocate(pool_init_size);
mr.deallocate(ptr, pool_init_size);
RMM_CUDA_TRY(cudaDeviceSynchronize());
}
TEST_F(AsyncMRTest, ExplicitReleaseThreshold)
{
const auto pool_init_size{100};
const auto pool_release_threshold{1000};
cuda_async_mr mr{pool_init_size, pool_release_threshold};
void* ptr = mr.allocate(pool_init_size);
mr.deallocate(ptr, pool_init_size);
RMM_CUDA_TRY(cudaDeviceSynchronize());
}
TEST_F(AsyncMRTest, DifferentPoolsUnequal)
{
const auto pool_init_size{100};
const auto pool_release_threshold{1000};
cuda_async_mr mr1{pool_init_size, pool_release_threshold};
cuda_async_mr mr2{pool_init_size, pool_release_threshold};
EXPECT_FALSE(mr1.is_equal(mr2));
}
#endif
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/mr_test.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../../byte_literals.hpp"
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/aligned.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/binning_memory_resource.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/fixed_size_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <gtest/gtest.h>
#include <cuda_runtime_api.h>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <random>
#include <utility>
namespace rmm::test {
/**
* @brief Returns if a pointer points to a device memory or managed memory
* allocation.
*/
inline bool is_device_memory(void* ptr)
{
cudaPointerAttributes attributes{};
if (cudaSuccess != cudaPointerGetAttributes(&attributes, ptr)) { return false; }
return (attributes.type == cudaMemoryTypeDevice) or (attributes.type == cudaMemoryTypeManaged);
}
enum size_in_bytes : size_t {};
constexpr auto default_num_allocations{100};
constexpr size_in_bytes default_max_size{5_MiB};
struct allocation {
void* ptr{nullptr};
std::size_t size{0};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
allocation() = default;
};
// Various test functions, shared between single-threaded and multithreaded tests.
inline void test_get_current_device_resource()
{
EXPECT_NE(nullptr, rmm::mr::get_current_device_resource());
void* ptr = rmm::mr::get_current_device_resource()->allocate(1_MiB);
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(ptr));
EXPECT_TRUE(is_device_memory(ptr));
rmm::mr::get_current_device_resource()->deallocate(ptr, 1_MiB);
}
inline void test_allocate(rmm::mr::device_memory_resource* mr,
std::size_t bytes,
cuda_stream_view stream = {})
{
void* ptr = mr->allocate(bytes);
if (not stream.is_default()) { stream.synchronize(); }
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(ptr));
EXPECT_TRUE(is_device_memory(ptr));
mr->deallocate(ptr, bytes);
if (not stream.is_default()) { stream.synchronize(); }
}
// Simple reproducer for https://github.com/rapidsai/rmm/issues/861
inline void concurrent_allocations_are_different(rmm::mr::device_memory_resource* mr,
cuda_stream_view stream)
{
const auto size{8_B};
void* ptr1 = mr->allocate(size, stream);
void* ptr2 = mr->allocate(size, stream);
EXPECT_NE(ptr1, ptr2);
mr->deallocate(ptr1, size, stream);
mr->deallocate(ptr2, size, stream);
}
inline void test_various_allocations(rmm::mr::device_memory_resource* mr, cuda_stream_view stream)
{
// test allocating zero bytes on non-default stream
{
void* ptr = mr->allocate(0, stream);
stream.synchronize();
EXPECT_NO_THROW(mr->deallocate(ptr, 0, stream));
stream.synchronize();
}
test_allocate(mr, 4_B, stream);
test_allocate(mr, 1_KiB, stream);
test_allocate(mr, 1_MiB, stream);
test_allocate(mr, 1_GiB, stream);
// should fail to allocate too much
{
void* ptr{nullptr};
EXPECT_THROW(ptr = mr->allocate(1_PiB, stream), rmm::out_of_memory);
EXPECT_EQ(nullptr, ptr);
// test e.what();
try {
ptr = mr->allocate(1_PiB, stream);
} catch (rmm::out_of_memory const& e) {
EXPECT_NE(std::string{e.what()}.find("out_of_memory"), std::string::npos);
}
}
}
inline void test_random_allocations(rmm::mr::device_memory_resource* mr,
std::size_t num_allocations = default_num_allocations,
size_in_bytes max_size = default_max_size,
cuda_stream_view stream = {})
{
std::vector<allocation> allocations(num_allocations);
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(1, max_size);
// num_allocations allocations from [0,max_size)
std::for_each(allocations.begin(),
allocations.end(),
[&generator, &distribution, stream, mr](allocation& alloc) {
alloc.size = distribution(generator);
EXPECT_NO_THROW(alloc.ptr = mr->allocate(alloc.size, stream));
if (not stream.is_default()) { stream.synchronize(); }
EXPECT_NE(nullptr, alloc.ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(alloc.ptr));
});
std::for_each(allocations.begin(), allocations.end(), [stream, mr](allocation& alloc) {
EXPECT_NO_THROW(mr->deallocate(alloc.ptr, alloc.size, stream));
if (not stream.is_default()) { stream.synchronize(); }
});
}
inline void test_mixed_random_allocation_free(rmm::mr::device_memory_resource* mr,
size_in_bytes max_size = default_max_size,
cuda_stream_view stream = {})
{
std::default_random_engine generator;
constexpr std::size_t num_allocations{100};
std::uniform_int_distribution<std::size_t> size_distribution(1, max_size);
constexpr int allocation_probability{53}; // percent
constexpr int max_probability{99};
std::uniform_int_distribution<int> op_distribution(0, max_probability);
std::uniform_int_distribution<int> index_distribution(0, num_allocations - 1);
std::size_t active_allocations{0};
std::size_t allocation_count{0};
std::vector<allocation> allocations;
for (std::size_t i = 0; i < num_allocations * 2; ++i) {
bool do_alloc = true;
if (active_allocations > 0) {
int chance = op_distribution(generator);
do_alloc = (chance < allocation_probability) && (allocation_count < num_allocations);
}
if (do_alloc) {
std::size_t size = size_distribution(generator);
active_allocations++;
allocation_count++;
EXPECT_NO_THROW(allocations.emplace_back(mr->allocate(size, stream), size));
auto new_allocation = allocations.back();
EXPECT_NE(nullptr, new_allocation.ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(new_allocation.ptr));
} else {
auto const index = static_cast<int>(index_distribution(generator) % active_allocations);
active_allocations--;
allocation to_free = allocations[index];
allocations.erase(std::next(allocations.begin(), index));
EXPECT_NO_THROW(mr->deallocate(to_free.ptr, to_free.size, stream));
}
}
EXPECT_EQ(active_allocations, 0);
EXPECT_EQ(allocations.size(), active_allocations);
}
using MRFactoryFunc = std::function<std::shared_ptr<rmm::mr::device_memory_resource>()>;
/// Encapsulates a `device_memory_resource` factory function and associated name
struct mr_factory {
mr_factory(std::string name, MRFactoryFunc factory)
: name{std::move(name)}, factory{std::move(factory)}
{
}
std::string name; ///< Name to associate with tests that use this factory
MRFactoryFunc factory; ///< Factory function that returns shared_ptr to `device_memory_resource`
///< instance to use in test
};
/// Test fixture class value-parameterized on different `mr_factory`s
struct mr_test : public ::testing::TestWithParam<mr_factory> {
void SetUp() override
{
auto factory = GetParam().factory;
mr = factory();
if (mr == nullptr) {
GTEST_SKIP() << "Skipping tests since the memory resource is not supported with this CUDA "
<< "driver/runtime version";
}
}
std::shared_ptr<rmm::mr::device_memory_resource> mr; ///< Pointer to resource to use in tests
rmm::cuda_stream stream{};
};
struct mr_allocation_test : public mr_test {};
/// MR factory functions
inline auto make_cuda() { return std::make_shared<rmm::mr::cuda_memory_resource>(); }
inline auto make_cuda_async()
{
if (rmm::detail::async_alloc::is_supported()) {
return std::make_shared<rmm::mr::cuda_async_memory_resource>();
}
return std::shared_ptr<rmm::mr::cuda_async_memory_resource>{nullptr};
}
inline auto make_managed() { return std::make_shared<rmm::mr::managed_memory_resource>(); }
inline auto make_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda());
}
inline auto make_arena()
{
return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(make_cuda());
}
inline auto make_fixed_size()
{
return rmm::mr::make_owning_wrapper<rmm::mr::fixed_size_memory_resource>(make_cuda());
}
inline auto make_binning()
{
auto pool = make_pool();
// Add a binning_memory_resource with fixed-size bins of sizes 256, 512, 1024, 2048 and 4096KiB
// Larger allocations will use the pool resource
auto const bin_range_start{18};
auto const bin_range_end{22};
auto mr = rmm::mr::make_owning_wrapper<rmm::mr::binning_memory_resource>(
pool, bin_range_start, bin_range_end);
return mr;
}
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/mr_multithreaded_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mr_test.hpp"
#include <gtest/gtest.h>
#include <rmm/cuda_stream.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <thread>
#include <vector>
namespace rmm::test {
namespace {
struct mr_test_mt : public mr_test {};
INSTANTIATE_TEST_CASE_P(MultiThreadResourceTests,
mr_test_mt,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning}),
[](auto const& info) { return info.param.name; });
template <typename Task, typename... Arguments>
void spawn_n(std::size_t num_threads, Task task, Arguments&&... args)
{
std::vector<std::thread> threads;
threads.reserve(num_threads);
for (std::size_t i = 0; i < num_threads; ++i) {
threads.emplace_back(std::thread(task, std::forward<Arguments>(args)...));
}
for (auto& thread : threads) {
thread.join();
}
}
template <typename Task, typename... Arguments>
void spawn(Task task, Arguments&&... args)
{
spawn_n(4, task, std::forward<Arguments>(args)...);
}
TEST(DefaultTest, UseCurrentDeviceResource_mt) { spawn(test_get_current_device_resource); }
TEST(DefaultTest, CurrentDeviceResourceIsCUDA_mt)
{
spawn([]() {
EXPECT_NE(nullptr, rmm::mr::get_current_device_resource());
EXPECT_TRUE(rmm::mr::get_current_device_resource()->is_equal(rmm::mr::cuda_memory_resource{}));
});
}
TEST(DefaultTest, GetCurrentDeviceResource_mt)
{
spawn([]() {
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource();
EXPECT_NE(nullptr, mr);
EXPECT_TRUE(mr->is_equal(rmm::mr::cuda_memory_resource{}));
});
}
TEST_P(mr_test_mt, SetCurrentDeviceResource_mt)
{
// single thread changes default resource, then multiple threads use it
rmm::mr::device_memory_resource* old = rmm::mr::set_current_device_resource(this->mr.get());
EXPECT_NE(nullptr, old);
spawn([mr = this->mr.get()]() {
EXPECT_EQ(mr, rmm::mr::get_current_device_resource());
test_get_current_device_resource(); // test allocating with the new default resource
});
// setting default resource w/ nullptr should reset to initial
rmm::mr::set_current_device_resource(nullptr);
EXPECT_TRUE(old->is_equal(*rmm::mr::get_current_device_resource()));
}
TEST_P(mr_test_mt, SetCurrentDeviceResourcePerThread_mt)
{
int num_devices{};
RMM_CUDA_TRY(cudaGetDeviceCount(&num_devices));
std::vector<std::thread> threads;
threads.reserve(num_devices);
for (int i = 0; i < num_devices; ++i) {
threads.emplace_back(std::thread{[mr = this->mr.get()](auto dev_id) {
RMM_CUDA_TRY(cudaSetDevice(dev_id));
rmm::mr::device_memory_resource* old =
rmm::mr::set_current_device_resource(mr);
EXPECT_NE(nullptr, old);
// initial resource for this device should be CUDA mr
EXPECT_TRUE(old->is_equal(rmm::mr::cuda_memory_resource{}));
// get_current_device_resource should equal the resource we
// just set
EXPECT_EQ(mr, rmm::mr::get_current_device_resource());
// Setting current dev resource to nullptr should reset to
// cuda MR and return the MR we previously set
old = rmm::mr::set_current_device_resource(nullptr);
EXPECT_NE(nullptr, old);
EXPECT_EQ(old, mr);
EXPECT_TRUE(rmm::mr::get_current_device_resource()->is_equal(
rmm::mr::cuda_memory_resource{}));
},
i});
}
for (auto& thread : threads) {
thread.join();
}
}
TEST_P(mr_test_mt, AllocateDefaultStream)
{
spawn(test_various_allocations, this->mr.get(), rmm::cuda_stream_view{});
}
TEST_P(mr_test_mt, AllocateOnStream)
{
spawn(test_various_allocations, this->mr.get(), this->stream.view());
}
TEST_P(mr_test_mt, RandomAllocationsDefaultStream)
{
spawn(test_random_allocations,
this->mr.get(),
default_num_allocations,
default_max_size,
rmm::cuda_stream_view{});
}
TEST_P(mr_test_mt, RandomAllocationsStream)
{
spawn(test_random_allocations,
this->mr.get(),
default_num_allocations,
default_max_size,
this->stream.view());
}
TEST_P(mr_test_mt, MixedRandomAllocationFreeDefaultStream)
{
spawn(
test_mixed_random_allocation_free, this->mr.get(), default_max_size, rmm::cuda_stream_view{});
}
TEST_P(mr_test_mt, MixedRandomAllocationFreeStream)
{
spawn(test_mixed_random_allocation_free, this->mr.get(), default_max_size, this->stream.view());
}
void allocate_loop(rmm::mr::device_memory_resource* mr,
std::size_t num_allocations,
std::list<allocation>& allocations,
std::mutex& mtx,
std::condition_variable& allocations_ready,
cudaEvent_t& event,
rmm::cuda_stream_view stream)
{
constexpr std::size_t max_size{1_MiB};
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> size_distribution(1, max_size);
for (std::size_t i = 0; i < num_allocations; ++i) {
std::size_t size = size_distribution(generator);
void* ptr = mr->allocate(size, stream);
{
std::lock_guard<std::mutex> lock(mtx);
RMM_CUDA_TRY(cudaEventRecord(event, stream.value()));
allocations.emplace_back(ptr, size);
}
allocations_ready.notify_one();
}
// Work around for threads going away before cudaEvent has finished async processing
cudaEventSynchronize(event);
}
void deallocate_loop(rmm::mr::device_memory_resource* mr,
std::size_t num_allocations,
std::list<allocation>& allocations,
std::mutex& mtx,
std::condition_variable& allocations_ready,
cudaEvent_t& event,
rmm::cuda_stream_view stream)
{
for (std::size_t i = 0; i < num_allocations; i++) {
std::unique_lock lock(mtx);
allocations_ready.wait(lock, [&allocations] { return !allocations.empty(); });
RMM_CUDA_TRY(cudaStreamWaitEvent(stream.value(), event));
allocation alloc = allocations.front();
allocations.pop_front();
mr->deallocate(alloc.ptr, alloc.size, stream);
}
// Work around for threads going away before cudaEvent has finished async processing
cudaEventSynchronize(event);
}
void test_allocate_free_different_threads(rmm::mr::device_memory_resource* mr,
rmm::cuda_stream_view streamA,
rmm::cuda_stream_view streamB)
{
constexpr std::size_t num_allocations{100};
std::mutex mtx;
std::condition_variable allocations_ready;
std::list<allocation> allocations;
cudaEvent_t event;
RMM_CUDA_TRY(cudaEventCreate(&event));
std::thread producer(allocate_loop,
mr,
num_allocations,
std::ref(allocations),
std::ref(mtx),
std::ref(allocations_ready),
std::ref(event),
streamA);
std::thread consumer(deallocate_loop,
mr,
num_allocations,
std::ref(allocations),
std::ref(mtx),
std::ref(allocations_ready),
std::ref(event),
streamB);
producer.join();
consumer.join();
RMM_CUDA_TRY(cudaEventDestroy(event));
}
TEST_P(mr_test_mt, AllocFreeDifferentThreadsDefaultStream)
{
test_allocate_free_different_threads(
this->mr.get(), rmm::cuda_stream_default, rmm::cuda_stream_default);
}
TEST_P(mr_test_mt, AllocFreeDifferentThreadsPerThreadDefaultStream)
{
test_allocate_free_different_threads(
this->mr.get(), rmm::cuda_stream_per_thread, rmm::cuda_stream_per_thread);
}
TEST_P(mr_test_mt, AllocFreeDifferentThreadsSameStream)
{
test_allocate_free_different_threads(this->mr.get(), this->stream, this->stream);
}
TEST_P(mr_test_mt, AllocFreeDifferentThreadsDifferentStream)
{
rmm::cuda_stream streamB;
test_allocate_free_different_threads(this->mr.get(), this->stream, streamB);
streamB.synchronize();
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/arena_mr_tests.cpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include <rmm/cuda_stream.hpp>
#include <rmm/detail/aligned.hpp>
#include <rmm/detail/cuda_util.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <memory>
#include <sys/stat.h>
#include <thread>
#include <vector>
namespace rmm::test {
namespace {
class mock_memory_resource {
public:
MOCK_METHOD(void*, allocate, (std::size_t));
MOCK_METHOD(void, deallocate, (void*, std::size_t));
};
using rmm::mr::detail::arena::block;
using rmm::mr::detail::arena::byte_span;
using rmm::mr::detail::arena::superblock;
using global_arena = rmm::mr::detail::arena::global_arena<mock_memory_resource>;
using arena = rmm::mr::detail::arena::arena<mock_memory_resource>;
using arena_mr = rmm::mr::arena_memory_resource<rmm::mr::device_memory_resource>;
using ::testing::Return;
// NOLINTBEGIN(cppcoreguidelines-pro-type-reinterpret-cast,performance-no-int-to-ptr)
auto const fake_address = reinterpret_cast<void*>(1_KiB);
auto const fake_address2 = reinterpret_cast<void*>(2_KiB);
auto const fake_address3 = reinterpret_cast<void*>(superblock::minimum_size);
auto const fake_address4 = reinterpret_cast<void*>(superblock::minimum_size * 2);
// NOLINTEND(cppcoreguidelines-pro-type-reinterpret-cast,performance-no-int-to-ptr)
struct ArenaTest : public ::testing::Test {
void SetUp() override
{
EXPECT_CALL(mock_mr, allocate(arena_size)).WillOnce(Return(fake_address3));
EXPECT_CALL(mock_mr, deallocate(fake_address3, arena_size));
global = std::make_unique<global_arena>(&mock_mr, arena_size);
per_thread = std::make_unique<arena>(*global);
}
std::size_t arena_size{superblock::minimum_size * 4};
mock_memory_resource mock_mr{};
std::unique_ptr<global_arena> global{};
std::unique_ptr<arena> per_thread{};
};
/**
* Test align_to_size_class.
*/
TEST_F(ArenaTest, AlignToSizeClass) // NOLINT
{
using rmm::mr::detail::arena::align_to_size_class;
EXPECT_EQ(align_to_size_class(8), 256);
EXPECT_EQ(align_to_size_class(256), 256);
EXPECT_EQ(align_to_size_class(264), 512);
EXPECT_EQ(align_to_size_class(512), 512);
EXPECT_EQ(align_to_size_class(17_KiB), 20_KiB);
EXPECT_EQ(align_to_size_class(13_MiB), 14_MiB);
EXPECT_EQ(align_to_size_class(2500_MiB), 2560_MiB);
EXPECT_EQ(align_to_size_class(128_GiB), 128_GiB);
EXPECT_EQ(align_to_size_class(1_PiB), std::numeric_limits<std::size_t>::max());
}
/**
* Test byte_span.
*/
TEST_F(ArenaTest, ByteSpan) // NOLINT
{
byte_span const span{};
EXPECT_FALSE(span.is_valid());
byte_span const span2{fake_address, 256};
EXPECT_TRUE(span2.is_valid());
}
/**
* Test block.
*/
TEST_F(ArenaTest, BlockFits) // NOLINT
{
block const blk{fake_address, 1_KiB};
EXPECT_TRUE(blk.fits(1_KiB));
EXPECT_FALSE(blk.fits(1_KiB + 1));
}
TEST_F(ArenaTest, BlockIsContiguousBefore) // NOLINT
{
block const blk{fake_address, 1_KiB};
block const blk2{fake_address2, 256};
EXPECT_TRUE(blk.is_contiguous_before(blk2));
block const blk3{fake_address, 512};
block const blk4{fake_address2, 1_KiB};
EXPECT_FALSE(blk3.is_contiguous_before(blk4));
}
TEST_F(ArenaTest, BlockSplit) // NOLINT
{
block const blk{fake_address, 2_KiB};
auto const [head, tail] = blk.split(1_KiB);
EXPECT_EQ(head.pointer(), fake_address);
EXPECT_EQ(head.size(), 1_KiB);
EXPECT_EQ(tail.pointer(), fake_address2);
EXPECT_EQ(tail.size(), 1_KiB);
}
TEST_F(ArenaTest, BlockMerge) // NOLINT
{
block const blk{fake_address, 1_KiB};
block const blk2{fake_address2, 1_KiB};
auto const merged = blk.merge(blk2);
EXPECT_EQ(merged.pointer(), fake_address);
EXPECT_EQ(merged.size(), 2_KiB);
}
/**
* Test superblock.
*/
TEST_F(ArenaTest, SuperblockEmpty) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
EXPECT_TRUE(sblk.empty());
sblk.first_fit(256);
EXPECT_FALSE(sblk.empty());
}
TEST_F(ArenaTest, SuperblockContains) // NOLINT
{
superblock const sblk{fake_address3, superblock::minimum_size};
block const blk{fake_address, 2_KiB};
EXPECT_FALSE(sblk.contains(blk));
block const blk2{fake_address3, 1_KiB};
EXPECT_TRUE(sblk.contains(blk2));
block const blk3{fake_address3, superblock::minimum_size + 1};
EXPECT_FALSE(sblk.contains(blk3));
block const blk4{fake_address3, superblock::minimum_size};
EXPECT_TRUE(sblk.contains(blk4));
block const blk5{fake_address4, 256};
EXPECT_FALSE(sblk.contains(blk5));
}
TEST_F(ArenaTest, SuperblockFits) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
EXPECT_TRUE(sblk.fits(superblock::minimum_size));
EXPECT_FALSE(sblk.fits(superblock::minimum_size + 1));
auto const blk = sblk.first_fit(superblock::minimum_size / 4);
sblk.first_fit(superblock::minimum_size / 4);
sblk.coalesce(blk);
EXPECT_TRUE(sblk.fits(superblock::minimum_size / 2));
EXPECT_FALSE(sblk.fits(superblock::minimum_size / 2 + 1));
}
TEST_F(ArenaTest, SuperblockIsContiguousBefore) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
superblock sb2{fake_address4, superblock::minimum_size};
EXPECT_TRUE(sblk.is_contiguous_before(sb2));
auto const blk = sblk.first_fit(256);
EXPECT_FALSE(sblk.is_contiguous_before(sb2));
sblk.coalesce(blk);
EXPECT_TRUE(sblk.is_contiguous_before(sb2));
auto const blk2 = sb2.first_fit(1_KiB);
EXPECT_FALSE(sblk.is_contiguous_before(sb2));
sb2.coalesce(blk2);
EXPECT_TRUE(sblk.is_contiguous_before(sb2));
}
TEST_F(ArenaTest, SuperblockSplit) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size * 2};
auto const [head, tail] = sblk.split(superblock::minimum_size);
EXPECT_EQ(head.pointer(), fake_address3);
EXPECT_EQ(head.size(), superblock::minimum_size);
EXPECT_TRUE(head.empty());
EXPECT_EQ(tail.pointer(), fake_address4);
EXPECT_EQ(tail.size(), superblock::minimum_size);
EXPECT_TRUE(tail.empty());
}
TEST_F(ArenaTest, SuperblockMerge) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
superblock sb2{fake_address4, superblock::minimum_size};
auto const merged = sblk.merge(sb2);
EXPECT_EQ(merged.pointer(), fake_address3);
EXPECT_EQ(merged.size(), superblock::minimum_size * 2);
EXPECT_TRUE(merged.empty());
}
TEST_F(ArenaTest, SuperblockFirstFit) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
auto const blk = sblk.first_fit(1_KiB);
EXPECT_EQ(blk.pointer(), fake_address3);
EXPECT_EQ(blk.size(), 1_KiB);
auto const blk2 = sblk.first_fit(2_KiB);
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
EXPECT_EQ(blk2.pointer(), static_cast<char*>(fake_address3) + 1_KiB);
EXPECT_EQ(blk2.size(), 2_KiB);
sblk.coalesce(blk);
auto const blk3 = sblk.first_fit(512);
EXPECT_EQ(blk3.pointer(), fake_address3);
EXPECT_EQ(blk3.size(), 512);
}
TEST_F(ArenaTest, SuperblockCoalesceAfterFull) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
auto const blk = sblk.first_fit(superblock::minimum_size / 2);
sblk.first_fit(superblock::minimum_size / 2);
sblk.coalesce(blk);
EXPECT_TRUE(sblk.first_fit(superblock::minimum_size / 2).is_valid());
}
TEST_F(ArenaTest, SuperblockCoalesceMergeNext) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
auto const blk = sblk.first_fit(superblock::minimum_size / 2);
sblk.coalesce(blk);
EXPECT_TRUE(sblk.first_fit(superblock::minimum_size).is_valid());
}
TEST_F(ArenaTest, SuperblockCoalesceMergePrevious) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
auto const blk = sblk.first_fit(1_KiB);
auto const blk2 = sblk.first_fit(1_KiB);
sblk.first_fit(1_KiB);
sblk.coalesce(blk);
sblk.coalesce(blk2);
auto const blk3 = sblk.first_fit(2_KiB);
EXPECT_EQ(blk3.pointer(), fake_address3);
}
TEST_F(ArenaTest, SuperblockCoalesceMergePreviousAndNext) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
auto const blk = sblk.first_fit(1_KiB);
auto const blk2 = sblk.first_fit(1_KiB);
sblk.coalesce(blk);
sblk.coalesce(blk2);
EXPECT_TRUE(sblk.first_fit(superblock::minimum_size).is_valid());
}
TEST_F(ArenaTest, SuperblockMaxFreeSize) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
auto const blk = sblk.first_fit(superblock::minimum_size / 4);
sblk.first_fit(superblock::minimum_size / 4);
sblk.coalesce(blk);
EXPECT_EQ(sblk.max_free_size(), superblock::minimum_size / 2);
}
TEST_F(ArenaTest, SuperblockMaxFreeSizeWhenFull) // NOLINT
{
superblock sblk{fake_address3, superblock::minimum_size};
sblk.first_fit(superblock::minimum_size);
EXPECT_EQ(sblk.max_free_size(), 0);
}
/**
* Test global_arena.
*/
TEST_F(ArenaTest, GlobalArenaNullUpstream) // NOLINT
{
auto construct_nullptr = []() { global_arena global{nullptr, std::nullopt}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error); // NOLINT(cppcoreguidelines-avoid-goto)
}
TEST_F(ArenaTest, GlobalArenaAcquire) // NOLINT
{
auto const sblk = global->acquire(256);
EXPECT_EQ(sblk.pointer(), fake_address3);
EXPECT_EQ(sblk.size(), superblock::minimum_size);
EXPECT_TRUE(sblk.empty());
auto const sb2 = global->acquire(1_KiB);
EXPECT_EQ(sb2.pointer(), fake_address4);
EXPECT_EQ(sb2.size(), superblock::minimum_size);
EXPECT_TRUE(sb2.empty());
global->acquire(512);
global->acquire(512);
EXPECT_FALSE(global->acquire(512).is_valid());
}
TEST_F(ArenaTest, GlobalArenaReleaseMergeNext) // NOLINT
{
auto sblk = global->acquire(256);
global->release(std::move(sblk));
auto* ptr = global->allocate(arena_size);
EXPECT_EQ(ptr, fake_address3);
}
TEST_F(ArenaTest, GlobalArenaReleaseMergePrevious) // NOLINT
{
auto sblk = global->acquire(256);
auto sb2 = global->acquire(1_KiB);
global->acquire(512);
global->release(std::move(sblk));
global->release(std::move(sb2));
auto* ptr = global->allocate(superblock::minimum_size * 2);
EXPECT_EQ(ptr, fake_address3);
}
TEST_F(ArenaTest, GlobalArenaReleaseMergePreviousAndNext) // NOLINT
{
auto sblk = global->acquire(256);
auto sb2 = global->acquire(1_KiB);
auto sb3 = global->acquire(512);
global->release(std::move(sblk));
global->release(std::move(sb3));
global->release(std::move(sb2));
auto* ptr = global->allocate(arena_size);
EXPECT_EQ(ptr, fake_address3);
}
TEST_F(ArenaTest, GlobalArenaReleaseMultiple) // NOLINT
{
std::set<superblock> superblocks{};
auto sblk = global->acquire(256);
superblocks.insert(std::move(sblk));
auto sb2 = global->acquire(1_KiB);
superblocks.insert(std::move(sb2));
auto sb3 = global->acquire(512);
superblocks.insert(std::move(sb3));
global->release(superblocks);
auto* ptr = global->allocate(arena_size);
EXPECT_EQ(ptr, fake_address3);
}
TEST_F(ArenaTest, GlobalArenaAllocate) // NOLINT
{
auto* ptr = global->allocate(superblock::minimum_size * 2);
EXPECT_EQ(ptr, fake_address3);
}
TEST_F(ArenaTest, GlobalArenaAllocateExtraLarge) // NOLINT
{
EXPECT_EQ(global->allocate(1_PiB), nullptr);
EXPECT_EQ(global->allocate(1_PiB), nullptr);
}
TEST_F(ArenaTest, GlobalArenaDeallocate) // NOLINT
{
auto* ptr = global->allocate(superblock::minimum_size * 2);
EXPECT_EQ(ptr, fake_address3);
global->deallocate(ptr, superblock::minimum_size * 2, {});
ptr = global->allocate(superblock::minimum_size * 2);
EXPECT_EQ(ptr, fake_address3);
}
TEST_F(ArenaTest, GlobalArenaDeallocateAlignUp) // NOLINT
{
auto* ptr = global->allocate(superblock::minimum_size + 256);
auto* ptr2 = global->allocate(superblock::minimum_size + 512);
global->deallocate(ptr, superblock::minimum_size + 256, {});
global->deallocate(ptr2, superblock::minimum_size + 512, {});
EXPECT_EQ(global->allocate(arena_size), fake_address3);
}
TEST_F(ArenaTest, GlobalArenaDeallocateFromOtherArena) // NOLINT
{
auto sblk = global->acquire(512);
auto const blk = sblk.first_fit(512);
auto const blk2 = sblk.first_fit(1024);
global->release(std::move(sblk));
global->deallocate(blk.pointer(), blk.size());
global->deallocate(blk2.pointer(), blk2.size());
EXPECT_EQ(global->allocate(arena_size), fake_address3);
}
/**
* Test arena.
*/
TEST_F(ArenaTest, ArenaAllocate) // NOLINT
{
EXPECT_EQ(per_thread->allocate(superblock::minimum_size), fake_address3);
EXPECT_EQ(per_thread->allocate(256), fake_address4);
}
TEST_F(ArenaTest, ArenaDeallocate) // NOLINT
{
auto* ptr = per_thread->allocate(superblock::minimum_size);
per_thread->deallocate(ptr, superblock::minimum_size, {});
auto* ptr2 = per_thread->allocate(256);
per_thread->deallocate(ptr2, 256, {});
EXPECT_EQ(per_thread->allocate(superblock::minimum_size), fake_address3);
}
TEST_F(ArenaTest, ArenaDeallocateMergePrevious) // NOLINT
{
auto* ptr = per_thread->allocate(256);
auto* ptr2 = per_thread->allocate(256);
per_thread->allocate(256);
per_thread->deallocate(ptr, 256, {});
per_thread->deallocate(ptr2, 256, {});
EXPECT_EQ(per_thread->allocate(512), fake_address3);
}
TEST_F(ArenaTest, ArenaDeallocateMergeNext) // NOLINT
{
auto* ptr = per_thread->allocate(256);
auto* ptr2 = per_thread->allocate(256);
per_thread->allocate(256);
per_thread->deallocate(ptr2, 256, {});
per_thread->deallocate(ptr, 256, {});
EXPECT_EQ(per_thread->allocate(512), fake_address3);
}
TEST_F(ArenaTest, ArenaDeallocateMergePreviousAndNext) // NOLINT
{
auto* ptr = per_thread->allocate(256);
auto* ptr2 = per_thread->allocate(256);
per_thread->deallocate(ptr, 256, {});
per_thread->deallocate(ptr2, 256, {});
EXPECT_EQ(per_thread->allocate(2_KiB), fake_address3);
}
TEST_F(ArenaTest, ArenaDefragment) // NOLINT
{
std::vector<void*> pointers;
std::size_t num_pointers{4};
for (std::size_t i = 0; i < num_pointers; i++) {
pointers.push_back(per_thread->allocate(superblock::minimum_size));
}
for (auto* ptr : pointers) {
per_thread->deallocate(ptr, superblock::minimum_size, {});
}
EXPECT_EQ(global->allocate(arena_size), nullptr);
per_thread->defragment();
EXPECT_EQ(global->allocate(arena_size), fake_address3);
}
/**
* Test arena_memory_resource.
*/
TEST_F(ArenaTest, ThrowOnNullUpstream) // NOLINT
{
auto construct_nullptr = []() { arena_mr mr{nullptr}; };
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto)
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST_F(ArenaTest, SizeSmallerThanSuperblockSize) // NOLINT
{
auto construct_small = []() { arena_mr mr{rmm::mr::get_current_device_resource(), 256}; };
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto)
EXPECT_THROW(construct_small(), rmm::logic_error);
}
TEST_F(ArenaTest, AllocateNinetyPercent) // NOLINT
{
EXPECT_NO_THROW([]() { // NOLINT(cppcoreguidelines-avoid-goto)
auto const free = rmm::detail::available_device_memory().first;
auto const ninety_percent =
rmm::detail::align_up(static_cast<std::size_t>(static_cast<double>(free) * 0.9),
rmm::detail::CUDA_ALLOCATION_ALIGNMENT);
arena_mr mr(rmm::mr::get_current_device_resource(), ninety_percent);
}());
}
TEST_F(ArenaTest, SmallMediumLarge) // NOLINT
{
EXPECT_NO_THROW([]() { // NOLINT(cppcoreguidelines-avoid-goto)
arena_mr mr(rmm::mr::get_current_device_resource());
auto* small = mr.allocate(256);
auto* medium = mr.allocate(64_MiB);
auto const free = rmm::detail::available_device_memory().first;
auto* large = mr.allocate(free / 3);
mr.deallocate(small, 256);
mr.deallocate(medium, 64_MiB);
mr.deallocate(large, free / 3);
}());
}
TEST_F(ArenaTest, Defragment) // NOLINT
{
EXPECT_NO_THROW([]() { // NOLINT(cppcoreguidelines-avoid-goto)
auto const arena_size = superblock::minimum_size * 4;
arena_mr mr(rmm::mr::get_current_device_resource(), arena_size);
std::vector<std::thread> threads;
std::size_t num_threads{4};
threads.reserve(num_threads);
for (std::size_t i = 0; i < num_threads; ++i) {
threads.emplace_back(std::thread([&] {
cuda_stream stream{};
void* ptr = mr.allocate(32_KiB, stream);
mr.deallocate(ptr, 32_KiB, stream);
}));
}
for (auto& thread : threads) {
thread.join();
}
auto* ptr = mr.allocate(arena_size);
mr.deallocate(ptr, arena_size);
}());
}
TEST_F(ArenaTest, DumpLogOnFailure) // NOLINT
{
arena_mr mr{rmm::mr::get_current_device_resource(), 1_MiB, true};
{ // make the log interesting
std::vector<std::thread> threads;
std::size_t num_threads{4};
threads.reserve(num_threads);
for (std::size_t i = 0; i < num_threads; ++i) {
threads.emplace_back(std::thread([&] {
void* ptr = mr.allocate(32_KiB);
mr.deallocate(ptr, 32_KiB);
}));
}
for (auto& thread : threads) {
thread.join();
}
}
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto)
EXPECT_THROW(mr.allocate(8_MiB), rmm::out_of_memory);
struct stat file_status {};
EXPECT_EQ(stat("rmm_arena_memory_dump.log", &file_status), 0);
EXPECT_GE(file_status.st_size, 0);
}
TEST_F(ArenaTest, FeatureSupport) // NOLINT
{
arena_mr mr{rmm::mr::get_current_device_resource(), 1_MiB};
EXPECT_TRUE(mr.supports_streams());
EXPECT_FALSE(mr.supports_get_mem_info());
auto [free, total] = mr.get_mem_info(rmm::cuda_stream_default);
EXPECT_EQ(free, 0);
EXPECT_EQ(total, 0);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/failure_callback_mr_tests.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include "rmm/cuda_stream_view.hpp"
#include "rmm/mr/device/device_memory_resource.hpp"
#include <cstddef>
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/failure_callback_resource_adaptor.hpp>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
template <typename ExceptionType = rmm::bad_alloc>
using failure_callback_adaptor =
rmm::mr::failure_callback_resource_adaptor<rmm::mr::device_memory_resource, ExceptionType>;
bool failure_handler(std::size_t /*bytes*/, void* arg)
{
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
bool& retried = *reinterpret_cast<bool*>(arg);
if (!retried) {
retried = true;
return true; // First time we request an allocation retry
}
return false; // Second time we let the adaptor throw std::bad_alloc
}
TEST(FailureCallbackTest, RetryAllocationOnce)
{
bool retried{false};
failure_callback_adaptor<> mr{rmm::mr::get_current_device_resource(), failure_handler, &retried};
EXPECT_EQ(retried, false);
EXPECT_THROW(mr.allocate(512_GiB), std::bad_alloc);
EXPECT_EQ(retried, true);
}
template <typename ExceptionType>
class always_throw_memory_resource final : public mr::device_memory_resource {
private:
void* do_allocate(std::size_t bytes, cuda_stream_view stream) override
{
throw ExceptionType{"foo"};
}
void do_deallocate(void* ptr, std::size_t bytes, cuda_stream_view stream) override{};
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
cuda_stream_view stream) const override
{
return {0, 0};
}
[[nodiscard]] bool supports_streams() const noexcept override { return false; }
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }
};
TEST(FailureCallbackTest, DifferentExceptionTypes)
{
always_throw_memory_resource<rmm::bad_alloc> bad_alloc_mr;
always_throw_memory_resource<rmm::out_of_memory> oom_mr;
EXPECT_THROW(bad_alloc_mr.allocate(1_MiB), rmm::bad_alloc);
EXPECT_THROW(oom_mr.allocate(1_MiB), rmm::out_of_memory);
// Wrap a bad_alloc-catching callback adaptor around an MR that always throws bad_alloc:
// Should retry once and then re-throw bad_alloc
{
bool retried{false};
failure_callback_adaptor<rmm::bad_alloc> bad_alloc_callback_mr{
&bad_alloc_mr, failure_handler, &retried};
EXPECT_EQ(retried, false);
EXPECT_THROW(bad_alloc_callback_mr.allocate(1_MiB), rmm::bad_alloc);
EXPECT_EQ(retried, true);
}
// Wrap a out_of_memory-catching callback adaptor around an MR that always throws out_of_memory:
// Should retry once and then re-throw out_of_memory
{
bool retried{false};
failure_callback_adaptor<rmm::out_of_memory> oom_callback_mr{
&oom_mr, failure_handler, &retried};
EXPECT_EQ(retried, false);
EXPECT_THROW(oom_callback_mr.allocate(1_MiB), rmm::out_of_memory);
EXPECT_EQ(retried, true);
}
// Wrap a out_of_memory-catching callback adaptor around an MR that always throws bad_alloc:
// Should not catch the bad_alloc exception
{
bool retried{false};
failure_callback_adaptor<rmm::out_of_memory> oom_callback_mr{
&bad_alloc_mr, failure_handler, &retried};
EXPECT_EQ(retried, false);
EXPECT_THROW(oom_callback_mr.allocate(1_MiB), rmm::bad_alloc); // bad_alloc passes through
EXPECT_EQ(retried, false); // Does not catch / retry on anything except OOM
}
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/stream_allocator_adaptor_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_stream.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <gtest/gtest.h>
#include <memory>
namespace {
struct allocator_test : public ::testing::Test {
rmm::cuda_stream stream{};
rmm::mr::polymorphic_allocator<int> allocator{};
};
TEST_F(allocator_test, factory)
{
using Adaptor = rmm::mr::stream_allocator_adaptor<decltype(allocator)>;
auto adapted = rmm::mr::make_stream_allocator_adaptor(allocator, stream);
static_assert((std::is_same<decltype(adapted), Adaptor>::value));
EXPECT_EQ(adapted.underlying_allocator(), allocator);
EXPECT_EQ(adapted.stream(), stream);
}
TEST_F(allocator_test, self_equality)
{
auto adapted = rmm::mr::make_stream_allocator_adaptor(allocator, stream);
EXPECT_EQ(adapted, adapted);
EXPECT_FALSE(adapted != adapted);
}
TEST_F(allocator_test, equal_allocators)
{
rmm::mr::polymorphic_allocator<int> alloc0;
auto adapted0 = rmm::mr::make_stream_allocator_adaptor(alloc0, stream);
rmm::mr::polymorphic_allocator<int> alloc1;
auto adapted1 = rmm::mr::make_stream_allocator_adaptor(alloc1, stream);
EXPECT_EQ(adapted0, adapted1);
EXPECT_FALSE(adapted0 != adapted1);
}
TEST_F(allocator_test, unequal_resources)
{
rmm::mr::cuda_memory_resource mr0;
rmm::mr::polymorphic_allocator<int> alloc0{&mr0};
auto adapted0 = rmm::mr::make_stream_allocator_adaptor(alloc0, stream);
rmm::mr::managed_memory_resource mr1;
rmm::mr::polymorphic_allocator<int> alloc1{&mr1};
auto adapted1 = rmm::mr::make_stream_allocator_adaptor(alloc1, stream);
EXPECT_NE(adapted0, adapted1);
}
TEST_F(allocator_test, copy_ctor_same_type)
{
rmm::mr::polymorphic_allocator<int> alloc0;
auto adapted0 = rmm::mr::make_stream_allocator_adaptor(alloc0, stream);
using Adaptor = rmm::mr::stream_allocator_adaptor<decltype(alloc0)>;
Adaptor adapted1{adapted0};
EXPECT_EQ(adapted0, adapted1);
}
TEST_F(allocator_test, copy_ctor_different_type)
{
rmm::mr::polymorphic_allocator<int> alloc0;
auto adapted0 = rmm::mr::make_stream_allocator_adaptor(alloc0, stream);
using Adaptor = rmm::mr::stream_allocator_adaptor<rmm::mr::polymorphic_allocator<double>>;
Adaptor adapted1{adapted0};
EXPECT_EQ(adapted0, adapted1);
}
TEST_F(allocator_test, rebind)
{
auto adapted = rmm::mr::make_stream_allocator_adaptor(allocator, stream);
using Rebound = std::allocator_traits<decltype(adapted)>::rebind_alloc<double>;
static_assert((std::is_same<std::allocator_traits<Rebound>::value_type, double>::value));
static_assert(
std::is_same<Rebound,
rmm::mr::stream_allocator_adaptor<rmm::mr::polymorphic_allocator<double>>>::value);
Rebound rebound{adapted};
}
TEST_F(allocator_test, allocate_deallocate)
{
auto adapted = rmm::mr::make_stream_allocator_adaptor(allocator, stream);
auto const size{1000};
auto* ptr = adapted.allocate(size);
EXPECT_NE(ptr, nullptr);
EXPECT_NO_THROW(adapted.deallocate(ptr, size));
}
} // namespace
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/callback_mr_tests.cpp
|
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include "../../mock_resource.hpp"
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/callback_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <cstddef>
#include <fmt/core.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
using ::testing::_;
TEST(CallbackTest, TestCallbacksAreInvoked)
{
auto base_mr = mock_resource();
EXPECT_CALL(base_mr, do_allocate(10_MiB, cuda_stream_view{})).Times(1);
EXPECT_CALL(base_mr, do_deallocate(_, 10_MiB, cuda_stream_view{})).Times(1);
auto allocate_callback = [](std::size_t size, cuda_stream_view stream, void* arg) {
auto base_mr = static_cast<rmm::mr::device_memory_resource*>(arg);
return base_mr->allocate(size, stream);
};
auto deallocate_callback = [](void* ptr, std::size_t size, cuda_stream_view stream, void* arg) {
auto base_mr = static_cast<rmm::mr::device_memory_resource*>(arg);
base_mr->deallocate(ptr, size, stream);
};
auto mr =
rmm::mr::callback_memory_resource(allocate_callback, deallocate_callback, &base_mr, &base_mr);
auto ptr = mr.allocate(10_MiB);
mr.deallocate(ptr, 10_MiB);
}
TEST(CallbackTest, LoggingTest)
{
testing::internal::CaptureStdout();
auto base_mr = rmm::mr::get_current_device_resource();
auto allocate_callback = [](std::size_t size, cuda_stream_view stream, void* arg) {
std::cout << "Allocating " << size << " bytes" << std::endl;
auto base_mr = static_cast<rmm::mr::device_memory_resource*>(arg);
return base_mr->allocate(size, stream);
};
auto deallocate_callback = [](void* ptr, std::size_t size, cuda_stream_view stream, void* arg) {
std::cout << "Deallocating " << size << " bytes" << std::endl;
auto base_mr = static_cast<rmm::mr::device_memory_resource*>(arg);
base_mr->deallocate(ptr, size, stream);
};
auto mr =
rmm::mr::callback_memory_resource(allocate_callback, deallocate_callback, base_mr, base_mr);
auto ptr = mr.allocate(10_MiB);
mr.deallocate(ptr, 10_MiB);
std::string output = testing::internal::GetCapturedStdout();
std::string expect = fmt::format("Allocating {} bytes\nDeallocating {} bytes\n", 10_MiB, 10_MiB);
ASSERT_EQ(expect, output);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/mr_ref_tests.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mr_ref_test.hpp"
#include <rmm/mr/device/per_device_resource.hpp>
#include <cuda/memory_resource>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
INSTANTIATE_TEST_SUITE_P(ResourceTests,
mr_ref_test,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning},
mr_factory{"Fixed_Size", &make_fixed_size}),
[](auto const& info) { return info.param.name; });
// Leave out fixed-size MR here because it can't handle the dynamic allocation sizes
INSTANTIATE_TEST_SUITE_P(ResourceAllocationTests,
mr_ref_allocation_test,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning}),
[](auto const& info) { return info.param.name; });
TEST_P(mr_ref_test, SelfEquality) { EXPECT_TRUE(this->ref == this->ref); }
// Simple reproducer for https://github.com/rapidsai/rmm/issues/861
TEST_P(mr_ref_test, AllocationsAreDifferent) { concurrent_allocations_are_different(this->ref); }
TEST_P(mr_ref_test, AsyncAllocationsAreDifferentDefaultStream)
{
concurrent_async_allocations_are_different(this->ref, cuda_stream_view{});
}
TEST_P(mr_ref_test, AsyncAllocationsAreDifferent)
{
concurrent_async_allocations_are_different(this->ref, this->stream);
}
TEST_P(mr_ref_allocation_test, AllocateDefault) { test_various_allocations(this->ref); }
TEST_P(mr_ref_allocation_test, AllocateDefaultStream)
{
test_various_async_allocations(this->ref, cuda_stream_view{});
}
TEST_P(mr_ref_allocation_test, AllocateOnStream)
{
test_various_async_allocations(this->ref, this->stream);
}
TEST_P(mr_ref_allocation_test, RandomAllocations) { test_random_allocations(this->ref); }
TEST_P(mr_ref_allocation_test, RandomAllocationsDefaultStream)
{
test_random_async_allocations(
this->ref, default_num_allocations, default_max_size, cuda_stream_view{});
}
TEST_P(mr_ref_allocation_test, RandomAllocationsStream)
{
test_random_async_allocations(this->ref, default_num_allocations, default_max_size, this->stream);
}
TEST_P(mr_ref_allocation_test, MixedRandomAllocationFree)
{
test_mixed_random_allocation_free(this->ref, default_max_size);
}
TEST_P(mr_ref_allocation_test, MixedRandomAllocationFreeDefaultStream)
{
test_mixed_random_async_allocation_free(this->ref, default_max_size, cuda_stream_view{});
}
TEST_P(mr_ref_allocation_test, MixedRandomAllocationFreeStream)
{
test_mixed_random_async_allocation_free(this->ref, default_max_size, this->stream);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/binning_mr_tests.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/mr/device/binning_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <gtest/gtest.h>
// explicit instantiation for test coverage purposes
template class rmm::mr::binning_memory_resource<rmm::mr::cuda_memory_resource>;
namespace rmm::test {
using cuda_mr = rmm::mr::cuda_memory_resource;
using binning_mr = rmm::mr::binning_memory_resource<cuda_mr>;
TEST(BinningTest, ThrowOnNullUpstream)
{
auto construct_nullptr = []() { binning_mr mr{nullptr}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST(BinningTest, ExplicitBinMR)
{
cuda_mr cuda{};
binning_mr mr{&cuda};
mr.add_bin(1024, &cuda);
auto* ptr = mr.allocate(512);
EXPECT_NE(ptr, nullptr);
mr.deallocate(ptr, 512);
}
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/adaptor_tests.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/aligned_resource_adaptor.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/failure_callback_resource_adaptor.hpp>
#include <rmm/mr/device/limiting_resource_adaptor.hpp>
#include <rmm/mr/device/logging_resource_adaptor.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/statistics_resource_adaptor.hpp>
#include <rmm/mr/device/thread_safe_resource_adaptor.hpp>
#include <rmm/mr/device/tracking_resource_adaptor.hpp>
#include <cuda/memory_resource>
#include <gtest/gtest.h>
#include <cstddef>
#include <type_traits>
using cuda_mr = rmm::mr::cuda_memory_resource;
using rmm::mr::aligned_resource_adaptor;
using rmm::mr::failure_callback_resource_adaptor;
using rmm::mr::limiting_resource_adaptor;
using rmm::mr::logging_resource_adaptor;
using rmm::mr::statistics_resource_adaptor;
using rmm::mr::thread_safe_resource_adaptor;
using rmm::mr::tracking_resource_adaptor;
using owning_wrapper = rmm::mr::owning_wrapper<aligned_resource_adaptor<cuda_mr>, cuda_mr>;
// explicit instantiations for test coverage purposes
template class rmm::mr::aligned_resource_adaptor<cuda_mr>;
template class rmm::mr::failure_callback_resource_adaptor<cuda_mr>;
template class rmm::mr::limiting_resource_adaptor<cuda_mr>;
template class rmm::mr::logging_resource_adaptor<cuda_mr>;
template class rmm::mr::statistics_resource_adaptor<cuda_mr>;
template class rmm::mr::thread_safe_resource_adaptor<cuda_mr>;
template class rmm::mr::tracking_resource_adaptor<cuda_mr>;
namespace rmm::test {
using adaptors = ::testing::Types<aligned_resource_adaptor<cuda_mr>,
failure_callback_resource_adaptor<cuda_mr>,
limiting_resource_adaptor<cuda_mr>,
logging_resource_adaptor<cuda_mr>,
owning_wrapper,
statistics_resource_adaptor<cuda_mr>,
thread_safe_resource_adaptor<cuda_mr>,
tracking_resource_adaptor<cuda_mr>>;
static_assert(
cuda::mr::resource_with<rmm::mr::aligned_resource_adaptor<cuda_mr>, cuda::mr::device_accessible>);
static_assert(cuda::mr::resource_with<rmm::mr::failure_callback_resource_adaptor<cuda_mr>,
cuda::mr::device_accessible>);
static_assert(cuda::mr::resource_with<rmm::mr::limiting_resource_adaptor<cuda_mr>,
cuda::mr::device_accessible>);
static_assert(
cuda::mr::resource_with<rmm::mr::logging_resource_adaptor<cuda_mr>, cuda::mr::device_accessible>);
static_assert(
cuda::mr::resource_with<rmm::mr::owning_wrapper<cuda_mr>, cuda::mr::device_accessible>);
static_assert(cuda::mr::resource_with<rmm::mr::statistics_resource_adaptor<cuda_mr>,
cuda::mr::device_accessible>);
static_assert(cuda::mr::resource_with<rmm::mr::thread_safe_resource_adaptor<cuda_mr>,
cuda::mr::device_accessible>);
static_assert(cuda::mr::resource_with<rmm::mr::tracking_resource_adaptor<cuda_mr>,
cuda::mr::device_accessible>);
template <typename MemoryResourceType>
struct AdaptorTest : public ::testing::Test {
using adaptor_type = MemoryResourceType;
cuda_mr cuda{};
std::shared_ptr<adaptor_type> mr;
AdaptorTest() : mr{make_adaptor(&cuda)} {}
auto make_adaptor(cuda_mr* upstream)
{
if constexpr (std::is_same_v<adaptor_type, failure_callback_resource_adaptor<cuda_mr>>) {
return std::make_shared<adaptor_type>(
upstream, [](std::size_t bytes, void* arg) { return false; }, nullptr);
} else if constexpr (std::is_same_v<adaptor_type, limiting_resource_adaptor<cuda_mr>>) {
return std::make_shared<adaptor_type>(upstream, 64_MiB);
} else if constexpr (std::is_same_v<adaptor_type, logging_resource_adaptor<cuda_mr>>) {
return std::make_shared<adaptor_type>(upstream, "rmm_adaptor_test_log.txt");
} else if constexpr (std::is_same_v<adaptor_type, owning_wrapper>) {
return mr::make_owning_wrapper<aligned_resource_adaptor>(std::make_shared<cuda_mr>());
} else {
return std::make_shared<adaptor_type>(upstream);
}
}
};
TYPED_TEST_CASE(AdaptorTest, adaptors);
TYPED_TEST(AdaptorTest, NullUpstream)
{
if constexpr (not std::is_same_v<TypeParam, owning_wrapper>) {
EXPECT_THROW(this->make_adaptor(nullptr), rmm::logic_error);
}
}
TYPED_TEST(AdaptorTest, Equality)
{
EXPECT_TRUE(this->mr->is_equal(*this->mr));
{
auto other_mr = this->make_adaptor(&this->cuda);
EXPECT_TRUE(this->mr->is_equal(*other_mr));
}
{
rmm::mr::device_memory_resource* device_mr = &this->cuda;
auto other_mr = aligned_resource_adaptor<rmm::mr::device_memory_resource>{device_mr};
EXPECT_FALSE(this->mr->is_equal(other_mr));
}
}
TYPED_TEST(AdaptorTest, GetUpstream)
{
if constexpr (std::is_same_v<TypeParam, owning_wrapper>) {
EXPECT_TRUE(this->mr->wrapped().get_upstream()->is_equal(this->cuda));
} else {
EXPECT_TRUE(this->mr->get_upstream()->is_equal(this->cuda));
}
}
TYPED_TEST(AdaptorTest, SupportsStreams)
{
EXPECT_EQ(this->mr->supports_streams(), this->cuda.supports_streams());
}
TYPED_TEST(AdaptorTest, MemInfo)
{
EXPECT_EQ(this->mr->supports_get_mem_info(), this->cuda.supports_get_mem_info());
auto [free, total] = this->mr->get_mem_info(rmm::cuda_stream_default);
if (this->mr->supports_get_mem_info()) {
EXPECT_NE(total, 0);
} else {
EXPECT_EQ(free, 0);
EXPECT_EQ(total, 0);
}
}
TYPED_TEST(AdaptorTest, AllocFree)
{
void* ptr{nullptr};
EXPECT_NO_THROW(ptr = this->mr->allocate(1024));
EXPECT_NE(ptr, nullptr);
EXPECT_NO_THROW(this->mr->deallocate(ptr, 1024));
}
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/cuda_async_view_mr_tests.cpp
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_device.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/cuda_async_view_memory_resource.hpp>
#include <cuda/memory_resource>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
using cuda_async_view_mr = rmm::mr::cuda_async_view_memory_resource;
static_assert(cuda::mr::resource_with<cuda_async_view_mr, cuda::mr::device_accessible>);
static_assert(cuda::mr::async_resource_with<cuda_async_view_mr, cuda::mr::device_accessible>);
#if defined(RMM_CUDA_MALLOC_ASYNC_SUPPORT)
TEST(PoolTest, UsePool)
{
cudaMemPool_t memPool{};
RMM_CUDA_TRY(rmm::detail::async_alloc::cudaDeviceGetDefaultMemPool(
&memPool, rmm::get_current_cuda_device().value()));
const auto pool_init_size{100};
cuda_async_view_mr mr{memPool};
void* ptr = mr.allocate(pool_init_size);
mr.deallocate(ptr, pool_init_size);
RMM_CUDA_TRY(cudaDeviceSynchronize());
}
TEST(PoolTest, NotTakingOwnershipOfPool)
{
cudaMemPoolProps poolProps = {};
poolProps.allocType = cudaMemAllocationTypePinned;
poolProps.location.id = rmm::get_current_cuda_device().value();
poolProps.location.type = cudaMemLocationTypeDevice;
cudaMemPool_t memPool{};
RMM_CUDA_TRY(rmm::detail::async_alloc::cudaMemPoolCreate(&memPool, &poolProps));
{
const auto pool_init_size{100};
cuda_async_view_mr mr{memPool};
void* ptr = mr.allocate(pool_init_size);
mr.deallocate(ptr, pool_init_size);
RMM_CUDA_TRY(cudaDeviceSynchronize());
}
auto destroy_valid_pool = [&]() {
auto result = rmm::detail::async_alloc::cudaMemPoolDestroy(memPool);
RMM_EXPECTS(result == cudaSuccess, "Pool wrapper did destroy pool");
};
EXPECT_NO_THROW(destroy_valid_pool());
}
TEST(PoolTest, ThrowIfNullptrPool)
{
auto construct_mr = []() {
cudaMemPool_t memPool{nullptr};
cuda_async_view_mr mr{memPool};
};
EXPECT_THROW(construct_mr(), rmm::logic_error);
}
#endif
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/statistics_mr_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/statistics_resource_adaptor.hpp>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
using statistics_adaptor = rmm::mr::statistics_resource_adaptor<rmm::mr::device_memory_resource>;
constexpr auto num_allocations{10};
constexpr auto num_more_allocations{5};
constexpr auto ten_MiB{10_MiB};
TEST(StatisticsTest, ThrowOnNullUpstream)
{
auto construct_nullptr = []() { statistics_adaptor mr{nullptr}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST(StatisticsTest, Empty)
{
statistics_adaptor mr{rmm::mr::get_current_device_resource()};
EXPECT_EQ(mr.get_bytes_counter().peak, 0);
EXPECT_EQ(mr.get_bytes_counter().total, 0);
EXPECT_EQ(mr.get_bytes_counter().value, 0);
EXPECT_EQ(mr.get_allocations_counter().peak, 0);
EXPECT_EQ(mr.get_allocations_counter().total, 0);
EXPECT_EQ(mr.get_allocations_counter().value, 0);
}
TEST(StatisticsTest, AllFreed)
{
statistics_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
allocations.reserve(num_allocations);
for (int i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
for (auto* alloc : allocations) {
mr.deallocate(alloc, ten_MiB);
}
// Counter values should be 0
EXPECT_EQ(mr.get_bytes_counter().value, 0);
EXPECT_EQ(mr.get_allocations_counter().value, 0);
}
TEST(StatisticsTest, PeakAllocations)
{
statistics_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
// Delete every other allocation
for (auto&& it = allocations.begin(); it != allocations.end(); ++it) {
mr.deallocate(*it, ten_MiB);
it = allocations.erase(it);
}
auto current_alloc_counts = mr.get_allocations_counter();
auto current_alloc_bytes = mr.get_bytes_counter();
// Verify current allocations
EXPECT_EQ(current_alloc_bytes.value, 50_MiB);
EXPECT_EQ(current_alloc_counts.value, 5);
// Verify peak allocations
EXPECT_EQ(current_alloc_bytes.peak, 100_MiB);
EXPECT_EQ(current_alloc_counts.peak, 10);
// Verify total allocations
EXPECT_EQ(current_alloc_bytes.total, 100_MiB);
EXPECT_EQ(current_alloc_counts.total, 10);
// Add 10 more to increase the peak
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
// Deallocate all remaining
for (auto& allocation : allocations) {
mr.deallocate(allocation, ten_MiB);
}
allocations.clear();
current_alloc_counts = mr.get_allocations_counter();
current_alloc_bytes = mr.get_bytes_counter();
// Verify current allocations
EXPECT_EQ(current_alloc_bytes.value, 0);
EXPECT_EQ(current_alloc_counts.value, 0);
// Verify peak allocations
EXPECT_EQ(current_alloc_bytes.peak, 150_MiB);
EXPECT_EQ(current_alloc_counts.peak, 15);
// Verify total allocations
EXPECT_EQ(current_alloc_bytes.total, 200_MiB);
EXPECT_EQ(current_alloc_counts.total, 20);
}
TEST(StatisticsTest, MultiTracking)
{
statistics_adaptor mr{rmm::mr::get_current_device_resource()};
rmm::mr::set_current_device_resource(&mr);
std::vector<std::shared_ptr<rmm::device_buffer>> allocations;
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.emplace_back(
std::make_shared<rmm::device_buffer>(ten_MiB, rmm::cuda_stream_default));
}
EXPECT_EQ(mr.get_allocations_counter().value, 10);
statistics_adaptor inner_mr{rmm::mr::get_current_device_resource()};
rmm::mr::set_current_device_resource(&inner_mr);
for (std::size_t i = 0; i < num_more_allocations; ++i) {
allocations.emplace_back(
std::make_shared<rmm::device_buffer>(ten_MiB, rmm::cuda_stream_default));
}
// Check the allocated bytes for both MRs
EXPECT_EQ(mr.get_allocations_counter().value, 15);
EXPECT_EQ(inner_mr.get_allocations_counter().value, 5);
EXPECT_EQ(mr.get_bytes_counter().value, 150_MiB);
EXPECT_EQ(inner_mr.get_bytes_counter().value, 50_MiB);
// Clear the allocations, causing all memory to be freed
allocations.clear();
// The current allocations for both MRs should be 0
EXPECT_EQ(mr.get_allocations_counter().value, 0);
EXPECT_EQ(inner_mr.get_allocations_counter().value, 0);
EXPECT_EQ(mr.get_bytes_counter().value, 0);
EXPECT_EQ(inner_mr.get_bytes_counter().value, 0);
// Finally, verify the peak and total values
EXPECT_EQ(mr.get_bytes_counter().peak, 150_MiB);
EXPECT_EQ(inner_mr.get_bytes_counter().peak, 50_MiB);
EXPECT_EQ(mr.get_allocations_counter().peak, 15);
EXPECT_EQ(inner_mr.get_allocations_counter().peak, 5);
// Reset the current device resource
rmm::mr::set_current_device_resource(mr.get_upstream());
}
TEST(StatisticsTest, NegativeInnerTracking)
{
// This tests the unlikely scenario where pointers are deallocated on an inner
// wrapped memory resource. This can happen if the MR is not saved with the
// memory pointer
statistics_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
EXPECT_EQ(mr.get_allocations_counter().value, 10);
statistics_adaptor inner_mr{&mr};
// Add more allocations
for (std::size_t i = 0; i < num_more_allocations; ++i) {
allocations.push_back(inner_mr.allocate(ten_MiB));
}
// Check the outstanding allocations
EXPECT_EQ(mr.get_allocations_counter().value, 15);
EXPECT_EQ(inner_mr.get_allocations_counter().value, 5);
// Check the current counts
EXPECT_EQ(mr.get_bytes_counter().value, 150_MiB);
EXPECT_EQ(inner_mr.get_bytes_counter().value, 50_MiB);
EXPECT_EQ(mr.get_allocations_counter().value, 15);
EXPECT_EQ(inner_mr.get_allocations_counter().value, 5);
// Deallocate all allocations using the inner_mr
for (auto& allocation : allocations) {
inner_mr.deallocate(allocation, ten_MiB);
}
allocations.clear();
// Check the current counts are 0 for the outer
EXPECT_EQ(mr.get_bytes_counter().value, 0);
EXPECT_EQ(mr.get_allocations_counter().value, 0);
// The inner_mr will have negative values
EXPECT_EQ(inner_mr.get_bytes_counter().value, -100_MiB);
EXPECT_EQ(inner_mr.get_allocations_counter().value, -10);
// Verify the peak and total
EXPECT_EQ(mr.get_bytes_counter().peak, 150_MiB);
EXPECT_EQ(inner_mr.get_bytes_counter().peak, 50_MiB);
EXPECT_EQ(mr.get_allocations_counter().peak, 15);
EXPECT_EQ(inner_mr.get_allocations_counter().peak, 5);
EXPECT_EQ(mr.get_bytes_counter().total, 150_MiB);
EXPECT_EQ(inner_mr.get_bytes_counter().total, 50_MiB);
EXPECT_EQ(mr.get_allocations_counter().total, 15);
EXPECT_EQ(inner_mr.get_allocations_counter().total, 5);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/mr_ref_test.hpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "../../byte_literals.hpp"
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/aligned.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/binning_memory_resource.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/fixed_size_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <gtest/gtest.h>
#include <cuda_runtime_api.h>
#include <cuda/memory_resource>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <random>
#include <utility>
using resource_ref = cuda::mr::resource_ref<cuda::mr::device_accessible>;
using async_resource_ref = cuda::mr::async_resource_ref<cuda::mr::device_accessible>;
namespace rmm::test {
/**
* @brief Returns if a pointer points to a device memory or managed memory
* allocation.
*/
inline bool is_device_memory(void* ptr)
{
cudaPointerAttributes attributes{};
if (cudaSuccess != cudaPointerGetAttributes(&attributes, ptr)) { return false; }
return (attributes.type == cudaMemoryTypeDevice) or (attributes.type == cudaMemoryTypeManaged);
}
enum size_in_bytes : size_t {};
constexpr auto default_num_allocations{100};
constexpr size_in_bytes default_max_size{5_MiB};
struct allocation {
void* ptr{nullptr};
std::size_t size{0};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
allocation() = default;
};
// Various test functions, shared between single-threaded and multithreaded tests.
inline void test_allocate(resource_ref ref, std::size_t bytes)
{
try {
void* ptr = ref.allocate(bytes);
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(ptr));
EXPECT_TRUE(is_device_memory(ptr));
ref.deallocate(ptr, bytes);
} catch (rmm::out_of_memory const& e) {
EXPECT_NE(std::string{e.what()}.find("out_of_memory"), std::string::npos);
}
}
inline void test_allocate_async(async_resource_ref ref,
std::size_t bytes,
cuda_stream_view stream = {})
{
try {
void* ptr = ref.allocate_async(bytes, stream);
if (not stream.is_default()) { stream.synchronize(); }
EXPECT_NE(nullptr, ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(ptr));
EXPECT_TRUE(is_device_memory(ptr));
ref.deallocate_async(ptr, bytes, stream);
if (not stream.is_default()) { stream.synchronize(); }
} catch (rmm::out_of_memory const& e) {
EXPECT_NE(std::string{e.what()}.find("out_of_memory"), std::string::npos);
}
}
// Simple reproducer for https://github.com/rapidsai/rmm/issues/861
inline void concurrent_allocations_are_different(resource_ref ref)
{
const auto size{8_B};
void* ptr1 = ref.allocate(size);
void* ptr2 = ref.allocate(size);
EXPECT_NE(ptr1, ptr2);
ref.deallocate(ptr1, size);
ref.deallocate(ptr2, size);
}
inline void concurrent_async_allocations_are_different(async_resource_ref ref,
cuda_stream_view stream)
{
const auto size{8_B};
void* ptr1 = ref.allocate_async(size, stream);
void* ptr2 = ref.allocate_async(size, stream);
EXPECT_NE(ptr1, ptr2);
ref.deallocate_async(ptr1, size, stream);
ref.deallocate_async(ptr2, size, stream);
}
inline void test_various_allocations(resource_ref ref)
{
// test allocating zero bytes on non-default stream
{
void* ptr = ref.allocate(0);
EXPECT_NO_THROW(ref.deallocate(ptr, 0));
}
test_allocate(ref, 4_B);
test_allocate(ref, 1_KiB);
test_allocate(ref, 1_MiB);
test_allocate(ref, 1_GiB);
// should fail to allocate too much
{
void* ptr{nullptr};
EXPECT_THROW(ptr = ref.allocate(1_PiB), rmm::out_of_memory);
EXPECT_EQ(nullptr, ptr);
// test e.what();
try {
ptr = ref.allocate(1_PiB);
} catch (rmm::out_of_memory const& e) {
EXPECT_NE(std::string{e.what()}.find("out_of_memory"), std::string::npos);
}
}
}
inline void test_various_async_allocations(async_resource_ref ref, cuda_stream_view stream)
{
// test allocating zero bytes on non-default stream
{
void* ptr = ref.allocate_async(0, stream);
stream.synchronize();
EXPECT_NO_THROW(ref.deallocate_async(ptr, 0, stream));
stream.synchronize();
}
test_allocate_async(ref, 4_B, stream);
test_allocate_async(ref, 1_KiB, stream);
test_allocate_async(ref, 1_MiB, stream);
test_allocate_async(ref, 1_GiB, stream);
// should fail to allocate too much
{
void* ptr{nullptr};
EXPECT_THROW(ptr = ref.allocate_async(1_PiB, stream), rmm::out_of_memory);
EXPECT_EQ(nullptr, ptr);
// test e.what();
try {
ptr = ref.allocate_async(1_PiB, stream);
} catch (rmm::out_of_memory const& e) {
EXPECT_NE(std::string{e.what()}.find("out_of_memory"), std::string::npos);
}
}
}
inline void test_random_allocations(resource_ref ref,
std::size_t num_allocations = default_num_allocations,
size_in_bytes max_size = default_max_size)
{
std::vector<allocation> allocations(num_allocations);
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(1, max_size);
// num_allocations allocations from [0,max_size)
std::for_each(
allocations.begin(), allocations.end(), [&generator, &distribution, &ref](allocation& alloc) {
alloc.size = distribution(generator);
EXPECT_NO_THROW(alloc.ptr = ref.allocate(alloc.size));
EXPECT_NE(nullptr, alloc.ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(alloc.ptr));
});
std::for_each(allocations.begin(), allocations.end(), [&ref](allocation& alloc) {
EXPECT_NO_THROW(ref.deallocate(alloc.ptr, alloc.size));
});
}
inline void test_random_async_allocations(async_resource_ref ref,
std::size_t num_allocations = default_num_allocations,
size_in_bytes max_size = default_max_size,
cuda_stream_view stream = {})
{
std::vector<allocation> allocations(num_allocations);
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(1, max_size);
// num_allocations allocations from [0,max_size)
std::for_each(allocations.begin(),
allocations.end(),
[&generator, &distribution, &ref, stream](allocation& alloc) {
alloc.size = distribution(generator);
EXPECT_NO_THROW(alloc.ptr = ref.allocate(alloc.size));
if (not stream.is_default()) { stream.synchronize(); }
EXPECT_NE(nullptr, alloc.ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(alloc.ptr));
});
std::for_each(allocations.begin(), allocations.end(), [stream, &ref](allocation& alloc) {
EXPECT_NO_THROW(ref.deallocate(alloc.ptr, alloc.size));
if (not stream.is_default()) { stream.synchronize(); }
});
}
inline void test_mixed_random_allocation_free(resource_ref ref,
size_in_bytes max_size = default_max_size)
{
std::default_random_engine generator;
constexpr std::size_t num_allocations{100};
std::uniform_int_distribution<std::size_t> size_distribution(1, max_size);
constexpr int allocation_probability{53}; // percent
constexpr int max_probability{99};
std::uniform_int_distribution<int> op_distribution(0, max_probability);
std::uniform_int_distribution<int> index_distribution(0, num_allocations - 1);
std::size_t active_allocations{0};
std::size_t allocation_count{0};
std::vector<allocation> allocations;
for (std::size_t i = 0; i < num_allocations * 2; ++i) {
bool do_alloc = true;
if (active_allocations > 0) {
int chance = op_distribution(generator);
do_alloc = (chance < allocation_probability) && (allocation_count < num_allocations);
}
if (do_alloc) {
std::size_t size = size_distribution(generator);
active_allocations++;
allocation_count++;
EXPECT_NO_THROW(allocations.emplace_back(ref.allocate(size), size));
auto new_allocation = allocations.back();
EXPECT_NE(nullptr, new_allocation.ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(new_allocation.ptr));
} else {
auto const index = static_cast<int>(index_distribution(generator) % active_allocations);
active_allocations--;
allocation to_free = allocations[index];
allocations.erase(std::next(allocations.begin(), index));
EXPECT_NO_THROW(ref.deallocate(to_free.ptr, to_free.size));
}
}
EXPECT_EQ(active_allocations, 0);
EXPECT_EQ(allocations.size(), active_allocations);
}
inline void test_mixed_random_async_allocation_free(async_resource_ref ref,
size_in_bytes max_size = default_max_size,
cuda_stream_view stream = {})
{
std::default_random_engine generator;
constexpr std::size_t num_allocations{100};
std::uniform_int_distribution<std::size_t> size_distribution(1, max_size);
constexpr int allocation_probability{53}; // percent
constexpr int max_probability{99};
std::uniform_int_distribution<int> op_distribution(0, max_probability);
std::uniform_int_distribution<int> index_distribution(0, num_allocations - 1);
std::size_t active_allocations{0};
std::size_t allocation_count{0};
std::vector<allocation> allocations;
for (std::size_t i = 0; i < num_allocations * 2; ++i) {
bool do_alloc = true;
if (active_allocations > 0) {
int chance = op_distribution(generator);
do_alloc = (chance < allocation_probability) && (allocation_count < num_allocations);
}
if (do_alloc) {
std::size_t size = size_distribution(generator);
active_allocations++;
allocation_count++;
EXPECT_NO_THROW(allocations.emplace_back(ref.allocate_async(size, stream), size));
auto new_allocation = allocations.back();
EXPECT_NE(nullptr, new_allocation.ptr);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(new_allocation.ptr));
} else {
auto const index = static_cast<int>(index_distribution(generator) % active_allocations);
active_allocations--;
allocation to_free = allocations[index];
allocations.erase(std::next(allocations.begin(), index));
EXPECT_NO_THROW(ref.deallocate_async(to_free.ptr, to_free.size, stream));
}
}
EXPECT_EQ(active_allocations, 0);
EXPECT_EQ(allocations.size(), active_allocations);
}
using MRFactoryFunc = std::function<std::shared_ptr<rmm::mr::device_memory_resource>()>;
/// Encapsulates a `device_memory_resource` factory function and associated name
struct mr_factory {
mr_factory(std::string name, MRFactoryFunc factory)
: name{std::move(name)}, factory{std::move(factory)}
{
}
std::string name; ///< Name to associate with tests that use this factory
MRFactoryFunc factory; ///< Factory function that returns shared_ptr to `device_memory_resource`
///< instance to use in test
};
/// Test fixture class value-parameterized on different `mr_factory`s
struct mr_ref_test : public ::testing::TestWithParam<mr_factory> {
void SetUp() override
{
auto factory = GetParam().factory;
mr = factory();
if (mr == nullptr) {
GTEST_SKIP() << "Skipping tests since the memory resource is not supported with this CUDA "
<< "driver/runtime version";
}
ref = async_resource_ref{*mr};
}
std::shared_ptr<rmm::mr::device_memory_resource> mr; ///< Pointer to resource to use in tests
async_resource_ref ref{*mr};
rmm::cuda_stream stream{};
};
struct mr_ref_allocation_test : public mr_ref_test {};
/// MR factory functions
inline auto make_cuda() { return std::make_shared<rmm::mr::cuda_memory_resource>(); }
inline auto make_cuda_async()
{
if (rmm::detail::async_alloc::is_supported()) {
return std::make_shared<rmm::mr::cuda_async_memory_resource>();
}
return std::shared_ptr<rmm::mr::cuda_async_memory_resource>{nullptr};
}
inline auto make_managed() { return std::make_shared<rmm::mr::managed_memory_resource>(); }
inline auto make_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda());
}
inline auto make_arena()
{
return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(make_cuda());
}
inline auto make_fixed_size()
{
return rmm::mr::make_owning_wrapper<rmm::mr::fixed_size_memory_resource>(make_cuda());
}
inline auto make_binning()
{
auto pool = make_pool();
// Add a binning_memory_resource with fixed-size bins of sizes 256, 512, 1024, 2048 and 4096KiB
// Larger allocations will use the pool resource
auto const bin_range_start{18};
auto const bin_range_end{22};
auto mr = rmm::mr::make_owning_wrapper<rmm::mr::binning_memory_resource>(
pool, bin_range_start, bin_range_end);
return mr;
}
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/pool_mr_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_device.hpp>
#include <rmm/detail/aligned.hpp>
#include <rmm/detail/cuda_util.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/limiting_resource_adaptor.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <gtest/gtest.h>
// explicit instantiation for test coverage purposes
template class rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>;
namespace rmm::test {
namespace {
using cuda_mr = rmm::mr::cuda_memory_resource;
using pool_mr = rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>;
using limiting_mr = rmm::mr::limiting_resource_adaptor<rmm::mr::cuda_memory_resource>;
TEST(PoolTest, ThrowOnNullUpstream)
{
auto construct_nullptr = []() { pool_mr mr{nullptr}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST(PoolTest, ThrowMaxLessThanInitial)
{
// Make sure first argument is enough larger than the second that alignment rounding doesn't
// make them equal
auto max_less_than_initial = []() {
const auto initial{1024};
const auto maximum{256};
pool_mr mr{rmm::mr::get_current_device_resource(), initial, maximum};
};
EXPECT_THROW(max_less_than_initial(), rmm::logic_error);
}
TEST(PoolTest, ReferenceThrowMaxLessThanInitial)
{
// Make sure first argument is enough larger than the second that alignment rounding doesn't
// make them equal
auto max_less_than_initial = []() {
const auto initial{1024};
const auto maximum{256};
pool_mr mr{*rmm::mr::get_current_device_resource(), initial, maximum};
};
EXPECT_THROW(max_less_than_initial(), rmm::logic_error);
}
TEST(PoolTest, AllocateNinetyPercent)
{
auto allocate_ninety = []() {
auto const [free, total] = rmm::detail::available_device_memory();
(void)total;
auto const ninety_percent_pool =
rmm::detail::align_up(static_cast<std::size_t>(static_cast<double>(free) * 0.9),
rmm::detail::CUDA_ALLOCATION_ALIGNMENT);
pool_mr mr{rmm::mr::get_current_device_resource(), ninety_percent_pool};
};
EXPECT_NO_THROW(allocate_ninety());
}
TEST(PoolTest, TwoLargeBuffers)
{
auto two_large = []() {
auto const [free, total] = rmm::detail::available_device_memory();
(void)total;
pool_mr mr{rmm::mr::get_current_device_resource()};
auto* ptr1 = mr.allocate(free / 4);
auto* ptr2 = mr.allocate(free / 4);
mr.deallocate(ptr1, free / 4);
mr.deallocate(ptr2, free / 4);
};
EXPECT_NO_THROW(two_large());
}
TEST(PoolTest, ForceGrowth)
{
cuda_mr cuda;
{
auto const max_size{6000};
limiting_mr limiter{&cuda, max_size};
pool_mr mr{&limiter, 0};
EXPECT_NO_THROW(mr.allocate(1000));
EXPECT_NO_THROW(mr.allocate(4000));
EXPECT_NO_THROW(mr.allocate(500));
EXPECT_THROW(mr.allocate(2000), rmm::out_of_memory); // too much
}
{
// with max pool size
auto const max_size{6000};
limiting_mr limiter{&cuda, max_size};
pool_mr mr{&limiter, 0, 8192};
EXPECT_NO_THROW(mr.allocate(1000));
EXPECT_THROW(mr.allocate(4000), rmm::out_of_memory); // too much
EXPECT_NO_THROW(mr.allocate(500));
EXPECT_NO_THROW(mr.allocate(2000)); // fits
}
}
TEST(PoolTest, DeletedStream)
{
pool_mr mr{rmm::mr::get_current_device_resource(), 0};
cudaStream_t stream{}; // we don't use rmm::cuda_stream here to make destruction more explicit
const int size = 10000;
EXPECT_EQ(cudaSuccess, cudaStreamCreate(&stream));
EXPECT_NO_THROW(rmm::device_buffer buff(size, cuda_stream_view{stream}, &mr));
EXPECT_EQ(cudaSuccess, cudaStreamDestroy(stream));
EXPECT_NO_THROW(mr.allocate(size));
}
// Issue #527
TEST(PoolTest, InitialAndMaxPoolSizeEqual)
{
EXPECT_NO_THROW([]() {
pool_mr mr(rmm::mr::get_current_device_resource(), 1000192, 1000192);
mr.allocate(1000);
}());
}
TEST(PoolTest, NonAlignedPoolSize)
{
EXPECT_THROW(
[]() {
pool_mr mr(rmm::mr::get_current_device_resource(), 1000031, 1000192);
mr.allocate(1000);
}(),
rmm::logic_error);
EXPECT_THROW(
[]() {
pool_mr mr(rmm::mr::get_current_device_resource(), 1000192, 1000200);
mr.allocate(1000);
}(),
rmm::logic_error);
}
TEST(PoolTest, UpstreamDoesntSupportMemInfo)
{
cuda_mr cuda;
pool_mr mr1(&cuda);
pool_mr mr2(&mr1);
auto* ptr = mr2.allocate(1024);
mr2.deallocate(ptr, 1024);
}
TEST(PoolTest, MultidevicePool)
{
using MemoryResource = rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>;
// Get the number of cuda devices
int num_devices = rmm::get_num_cuda_devices();
// only run on multidevice systems
if (num_devices >= 2) {
rmm::mr::cuda_memory_resource general_mr;
// initializing pool_memory_resource of multiple devices
int devices = 2;
size_t pool_size = 1024;
std::vector<std::shared_ptr<MemoryResource>> mrs;
for (int i = 0; i < devices; ++i) {
RMM_CUDA_TRY(cudaSetDevice(i));
auto mr = std::make_shared<MemoryResource>(&general_mr, pool_size, pool_size);
rmm::mr::set_per_device_resource(rmm::cuda_device_id{i}, mr.get());
mrs.emplace_back(mr);
}
{
RMM_CUDA_TRY(cudaSetDevice(0));
rmm::device_buffer buf_a(16, rmm::cuda_stream_per_thread, mrs[0].get());
{
RMM_CUDA_TRY(cudaSetDevice(1));
rmm::device_buffer buf_b(16, rmm::cuda_stream_per_thread, mrs[1].get());
}
RMM_CUDA_TRY(cudaSetDevice(0));
}
}
}
} // namespace
namespace test_properties {
class fake_async_resource {
public:
// To model `async_resource`
void* allocate(std::size_t, std::size_t) { return nullptr; }
void deallocate(void* ptr, std::size_t, std::size_t) {}
void* allocate_async(std::size_t, std::size_t, cuda::stream_ref) { return nullptr; }
void deallocate_async(void* ptr, std::size_t, std::size_t, cuda::stream_ref) {}
bool operator==(const fake_async_resource& other) const { return true; }
bool operator!=(const fake_async_resource& other) const { return false; }
// To model stream_resource
[[nodiscard]] bool supports_streams() const noexcept { return false; }
[[nodiscard]] bool supports_get_mem_info() const noexcept { return false; }
private:
void* do_allocate(std::size_t bytes, cuda_stream_view) { return nullptr; }
void do_deallocate(void* ptr, std::size_t, cuda_stream_view) {}
[[nodiscard]] bool do_is_equal(fake_async_resource const& other) const noexcept { return true; }
};
static_assert(!cuda::has_property<fake_async_resource, cuda::mr::device_accessible>);
static_assert(!cuda::has_property<rmm::mr::pool_memory_resource<fake_async_resource>,
cuda::mr::device_accessible>);
// Ensure that we forward the property if it is there
class fake_async_resource_device_accessible : public fake_async_resource {
friend void get_property(const fake_async_resource_device_accessible&,
cuda::mr::device_accessible)
{
}
};
static_assert(
cuda::has_property<fake_async_resource_device_accessible, cuda::mr::device_accessible>);
static_assert(
cuda::has_property<rmm::mr::pool_memory_resource<fake_async_resource_device_accessible>,
cuda::mr::device_accessible>);
} // namespace test_properties
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/aligned_mr_tests.cpp
|
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../mock_resource.hpp"
#include <rmm/detail/aligned.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/aligned_resource_adaptor.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
using ::testing::Return;
using aligned_mock = rmm::mr::aligned_resource_adaptor<mock_resource>;
using aligned_real = rmm::mr::aligned_resource_adaptor<rmm::mr::device_memory_resource>;
void* int_to_address(std::size_t val)
{
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast, performance-no-int-to-ptr)
return reinterpret_cast<void*>(val);
}
TEST(AlignedTest, ThrowOnNullUpstream)
{
auto construct_nullptr = []() { aligned_mock mr{nullptr}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST(AlignedTest, ThrowOnInvalidAllocationAlignment)
{
mock_resource mock;
auto construct_alignment = [](auto* memres, std::size_t align) {
aligned_mock mr{memres, align};
};
EXPECT_THROW(construct_alignment(&mock, 255), rmm::logic_error);
EXPECT_NO_THROW(construct_alignment(&mock, 256));
EXPECT_THROW(construct_alignment(&mock, 768), rmm::logic_error);
}
TEST(AlignedTest, SupportsStreams)
{
mock_resource mock;
aligned_mock mr{&mock};
EXPECT_CALL(mock, supports_streams()).WillOnce(Return(true));
EXPECT_TRUE(mr.supports_streams());
EXPECT_CALL(mock, supports_streams()).WillOnce(Return(false));
EXPECT_FALSE(mr.supports_streams());
}
TEST(AlignedTest, SupportsGetMemInfo)
{
mock_resource mock;
aligned_mock mr{&mock};
EXPECT_CALL(mock, supports_get_mem_info()).WillOnce(Return(true));
EXPECT_TRUE(mr.supports_get_mem_info());
EXPECT_CALL(mock, supports_get_mem_info()).WillOnce(Return(false));
EXPECT_FALSE(mr.supports_get_mem_info());
}
TEST(AlignedTest, DefaultAllocationAlignmentPassthrough)
{
mock_resource mock;
aligned_mock mr{&mock};
cuda_stream_view stream;
void* const pointer = int_to_address(123);
{
auto const size{5};
EXPECT_CALL(mock, do_allocate(size, stream)).WillOnce(Return(pointer));
EXPECT_CALL(mock, do_deallocate(pointer, size, stream)).Times(1);
}
{
auto const size{5};
EXPECT_EQ(mr.allocate(size, stream), pointer);
mr.deallocate(pointer, size, stream);
}
}
TEST(AlignedTest, BelowAlignmentThresholdPassthrough)
{
mock_resource mock;
auto const alignment{4096};
auto const threshold{65536};
aligned_mock mr{&mock, alignment, threshold};
cuda_stream_view stream;
void* const pointer = int_to_address(123);
{
auto const size{3};
EXPECT_CALL(mock, do_allocate(size, stream)).WillOnce(Return(pointer));
EXPECT_CALL(mock, do_deallocate(pointer, size, stream)).Times(1);
}
{
auto const size{3};
EXPECT_EQ(mr.allocate(size, stream), pointer);
mr.deallocate(pointer, size, stream);
}
{
auto const size{65528};
void* const pointer1 = int_to_address(456);
EXPECT_CALL(mock, do_allocate(size, stream)).WillOnce(Return(pointer1));
EXPECT_CALL(mock, do_deallocate(pointer1, size, stream)).Times(1);
EXPECT_EQ(mr.allocate(size, stream), pointer1);
mr.deallocate(pointer1, size, stream);
}
}
TEST(AlignedTest, UpstreamAddressAlreadyAligned)
{
mock_resource mock;
auto const alignment{4096};
auto const threshold{65536};
aligned_mock mr{&mock, alignment, threshold};
cuda_stream_view stream;
void* const pointer = int_to_address(4096);
{
auto const size{69376};
EXPECT_CALL(mock, do_allocate(size, stream)).WillOnce(Return(pointer));
EXPECT_CALL(mock, do_deallocate(pointer, size, stream)).Times(1);
}
{
auto const size{65536};
EXPECT_EQ(mr.allocate(size, stream), pointer);
mr.deallocate(pointer, size, stream);
}
}
TEST(AlignedTest, AlignUpstreamAddress)
{
mock_resource mock;
auto const alignment{4096};
auto const threshold{65536};
aligned_mock mr{&mock, alignment, threshold};
cuda_stream_view stream;
{
void* const pointer = int_to_address(256);
auto const size{69376};
EXPECT_CALL(mock, do_allocate(size, stream)).WillOnce(Return(pointer));
EXPECT_CALL(mock, do_deallocate(pointer, size, stream)).Times(1);
}
{
void* const expected_pointer = int_to_address(4096);
auto const size{65536};
EXPECT_EQ(mr.allocate(size, stream), expected_pointer);
mr.deallocate(expected_pointer, size, stream);
}
}
TEST(AlignedTest, AlignMultiple)
{
mock_resource mock;
auto const alignment{4096};
auto const threshold{65536};
aligned_mock mr{&mock, alignment, threshold};
cuda_stream_view stream;
{
void* const pointer1 = int_to_address(256);
void* const pointer2 = int_to_address(131584);
void* const pointer3 = int_to_address(263168);
auto const size1{69376};
auto const size2{77568};
auto const size3{81664};
EXPECT_CALL(mock, do_allocate(size1, stream)).WillOnce(Return(pointer1));
EXPECT_CALL(mock, do_allocate(size2, stream)).WillOnce(Return(pointer2));
EXPECT_CALL(mock, do_allocate(size3, stream)).WillOnce(Return(pointer3));
EXPECT_CALL(mock, do_deallocate(pointer1, size1, stream)).Times(1);
EXPECT_CALL(mock, do_deallocate(pointer2, size2, stream)).Times(1);
EXPECT_CALL(mock, do_deallocate(pointer3, size3, stream)).Times(1);
}
{
void* const expected_pointer1 = int_to_address(4096);
void* const expected_pointer2 = int_to_address(135168);
void* const expected_pointer3 = int_to_address(266240);
auto const size1{65536};
auto const size2{73728};
auto const size3{77800};
EXPECT_EQ(mr.allocate(size1, stream), expected_pointer1);
EXPECT_EQ(mr.allocate(size2, stream), expected_pointer2);
EXPECT_EQ(mr.allocate(size3, stream), expected_pointer3);
mr.deallocate(expected_pointer1, size1, stream);
mr.deallocate(expected_pointer2, size2, stream);
mr.deallocate(expected_pointer3, size3, stream);
}
}
TEST(AlignedTest, AlignRealPointer)
{
auto const alignment{4096};
auto const threshold{65536};
aligned_real mr{rmm::mr::get_current_device_resource(), alignment, threshold};
void* alloc = mr.allocate(threshold);
EXPECT_TRUE(rmm::detail::is_pointer_aligned(alloc, alignment));
mr.deallocate(alloc, threshold);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/mr_ref_multithreaded_tests.cpp
|
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mr_ref_test.hpp"
#include <gtest/gtest.h>
#include <rmm/cuda_stream.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cuda/memory_resource>
#include <thread>
#include <vector>
namespace rmm::test {
namespace {
struct mr_ref_test_mt : public mr_ref_test {};
INSTANTIATE_TEST_CASE_P(MultiThreadResourceTests,
mr_ref_test_mt,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning}),
[](auto const& info) { return info.param.name; });
template <typename Task, typename... Arguments>
void spawn_n(std::size_t num_threads, Task task, Arguments&&... args)
{
std::vector<std::thread> threads;
threads.reserve(num_threads);
for (std::size_t i = 0; i < num_threads; ++i) {
threads.emplace_back(std::thread(task, std::forward<Arguments>(args)...));
}
for (auto& thread : threads) {
thread.join();
}
}
template <typename Task, typename... Arguments>
void spawn(Task task, Arguments&&... args)
{
spawn_n(4, task, std::forward<Arguments>(args)...);
}
TEST_P(mr_ref_test_mt, Allocate) { spawn(test_various_allocations, this->ref); }
TEST_P(mr_ref_test_mt, AllocateDefaultStream)
{
spawn(test_various_async_allocations, this->ref, rmm::cuda_stream_view{});
}
TEST_P(mr_ref_test_mt, AllocateOnStream)
{
spawn(test_various_async_allocations, this->ref, this->stream.view());
}
TEST_P(mr_ref_test_mt, RandomAllocations)
{
spawn(test_random_allocations, this->ref, default_num_allocations, default_max_size);
}
TEST_P(mr_ref_test_mt, RandomAllocationsDefaultStream)
{
spawn(test_random_async_allocations,
this->ref,
default_num_allocations,
default_max_size,
rmm::cuda_stream_view{});
}
TEST_P(mr_ref_test_mt, RandomAllocationsStream)
{
spawn(test_random_async_allocations,
this->ref,
default_num_allocations,
default_max_size,
this->stream.view());
}
TEST_P(mr_ref_test_mt, MixedRandomAllocationFree)
{
spawn(test_mixed_random_allocation_free, this->ref, default_max_size);
}
TEST_P(mr_ref_test_mt, MixedRandomAllocationFreeDefaultStream)
{
spawn(
test_mixed_random_async_allocation_free, this->ref, default_max_size, rmm::cuda_stream_view{});
}
TEST_P(mr_ref_test_mt, MixedRandomAllocationFreeStream)
{
spawn(test_mixed_random_async_allocation_free, this->ref, default_max_size, this->stream.view());
}
void allocate_async_loop(async_resource_ref ref,
std::size_t num_allocations,
std::list<allocation>& allocations,
std::mutex& mtx,
std::condition_variable& allocations_ready,
cudaEvent_t& event,
rmm::cuda_stream_view stream)
{
constexpr std::size_t max_size{1_MiB};
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> size_distribution(1, max_size);
for (std::size_t i = 0; i < num_allocations; ++i) {
std::size_t size = size_distribution(generator);
void* ptr = ref.allocate_async(size, stream);
{
std::lock_guard<std::mutex> lock(mtx);
RMM_CUDA_TRY(cudaEventRecord(event, stream.value()));
allocations.emplace_back(ptr, size);
}
allocations_ready.notify_one();
}
// Work around for threads going away before cudaEvent has finished async processing
cudaEventSynchronize(event);
}
void deallocate_async_loop(async_resource_ref ref,
std::size_t num_allocations,
std::list<allocation>& allocations,
std::mutex& mtx,
std::condition_variable& allocations_ready,
cudaEvent_t& event,
rmm::cuda_stream_view stream)
{
for (std::size_t i = 0; i < num_allocations; i++) {
std::unique_lock lock(mtx);
allocations_ready.wait(lock, [&allocations] { return !allocations.empty(); });
RMM_CUDA_TRY(cudaStreamWaitEvent(stream.value(), event));
allocation alloc = allocations.front();
allocations.pop_front();
ref.deallocate_async(alloc.ptr, alloc.size, stream);
}
// Work around for threads going away before cudaEvent has finished async processing
cudaEventSynchronize(event);
}
void test_allocate_async_free_different_threads(async_resource_ref ref,
rmm::cuda_stream_view streamA,
rmm::cuda_stream_view streamB)
{
constexpr std::size_t num_allocations{100};
std::mutex mtx;
std::condition_variable allocations_ready;
std::list<allocation> allocations;
cudaEvent_t event;
RMM_CUDA_TRY(cudaEventCreate(&event));
std::thread producer(allocate_async_loop,
ref,
num_allocations,
std::ref(allocations),
std::ref(mtx),
std::ref(allocations_ready),
std::ref(event),
streamA);
std::thread consumer(deallocate_async_loop,
ref,
num_allocations,
std::ref(allocations),
std::ref(mtx),
std::ref(allocations_ready),
std::ref(event),
streamB);
producer.join();
consumer.join();
RMM_CUDA_TRY(cudaEventDestroy(event));
}
TEST_P(mr_ref_test_mt, AllocFreeDifferentThreadsDefaultStream)
{
test_allocate_async_free_different_threads(
this->ref, rmm::cuda_stream_default, rmm::cuda_stream_default);
}
TEST_P(mr_ref_test_mt, AllocFreeDifferentThreadsPerThreadDefaultStream)
{
test_allocate_async_free_different_threads(
this->ref, rmm::cuda_stream_per_thread, rmm::cuda_stream_per_thread);
}
TEST_P(mr_ref_test_mt, AllocFreeDifferentThreadsSameStream)
{
test_allocate_async_free_different_threads(this->ref, this->stream, this->stream);
}
TEST_P(mr_ref_test_mt, AllocFreeDifferentThreadsDifferentStream)
{
rmm::cuda_stream streamB;
test_allocate_async_free_different_threads(this->ref, this->stream, streamB);
streamB.synchronize();
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/thrust_allocator_tests.cu
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mr_test.hpp"
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/thrust_allocator_adaptor.hpp>
#include <gtest/gtest.h>
#include <thrust/reduce.h>
// explicit instantiation for test coverage purposes
template class rmm::mr::thrust_allocator<int>;
namespace rmm::test {
namespace {
struct allocator_test : public mr_test {};
using async_resource_ref = cuda::mr::async_resource_ref<cuda::mr::device_accessible>;
TEST_P(allocator_test, first)
{
auto const num_ints{100};
rmm::device_vector<int> ints(num_ints, 1);
EXPECT_EQ(num_ints, thrust::reduce(ints.begin(), ints.end()));
}
TEST_P(allocator_test, defaults)
{
rmm::mr::thrust_allocator<int> allocator(rmm::cuda_stream_default);
EXPECT_EQ(allocator.stream(), rmm::cuda_stream_default);
EXPECT_EQ(allocator.memory_resource(),
async_resource_ref{rmm::mr::get_current_device_resource()});
}
INSTANTIATE_TEST_CASE_P(ThrustAllocatorTests,
allocator_test,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning}),
[](auto const& info) { return info.param.name; });
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/mr_tests.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mr_test.hpp"
#include <rmm/mr/device/per_device_resource.hpp>
#include <gtest/gtest.h>
namespace rmm::test {
namespace {
INSTANTIATE_TEST_SUITE_P(ResourceTests,
mr_test,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning},
mr_factory{"Fixed_Size", &make_fixed_size}),
[](auto const& info) { return info.param.name; });
// Leave out fixed-size MR here because it can't handle the dynamic allocation sizes
INSTANTIATE_TEST_SUITE_P(ResourceAllocationTests,
mr_allocation_test,
::testing::Values(mr_factory{"CUDA", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
mr_factory{"CUDA_Async", &make_cuda_async},
#endif
mr_factory{"Managed", &make_managed},
mr_factory{"Pool", &make_pool},
mr_factory{"Arena", &make_arena},
mr_factory{"Binning", &make_binning}),
[](auto const& info) { return info.param.name; });
TEST(DefaultTest, CurrentDeviceResourceIsCUDA)
{
EXPECT_NE(nullptr, rmm::mr::get_current_device_resource());
EXPECT_TRUE(rmm::mr::get_current_device_resource()->is_equal(rmm::mr::cuda_memory_resource{}));
}
TEST(DefaultTest, UseCurrentDeviceResource) { test_get_current_device_resource(); }
TEST(DefaultTest, GetCurrentDeviceResource)
{
auto* mr = rmm::mr::get_current_device_resource();
EXPECT_NE(nullptr, mr);
EXPECT_TRUE(mr->is_equal(rmm::mr::cuda_memory_resource{}));
}
TEST_P(mr_test, SetCurrentDeviceResource)
{
rmm::mr::device_memory_resource* old{};
old = rmm::mr::set_current_device_resource(this->mr.get());
EXPECT_NE(nullptr, old);
// old mr should equal a cuda mr
EXPECT_TRUE(old->is_equal(rmm::mr::cuda_memory_resource{}));
// current dev resource should equal this resource
EXPECT_TRUE(this->mr->is_equal(*rmm::mr::get_current_device_resource()));
test_get_current_device_resource();
// setting to `nullptr` should reset to initial cuda resource
rmm::mr::set_current_device_resource(nullptr);
EXPECT_TRUE(rmm::mr::get_current_device_resource()->is_equal(rmm::mr::cuda_memory_resource{}));
}
TEST_P(mr_test, SelfEquality) { EXPECT_TRUE(this->mr->is_equal(*this->mr)); }
TEST_P(mr_test, SupportsStreams)
{
if (this->mr->is_equal(rmm::mr::cuda_memory_resource{}) ||
this->mr->is_equal(rmm::mr::managed_memory_resource{})) {
EXPECT_FALSE(this->mr->supports_streams());
} else {
EXPECT_TRUE(this->mr->supports_streams());
}
}
TEST_P(mr_test, GetMemInfo)
{
if (this->mr->supports_get_mem_info()) {
const auto allocation_size{16 * 256};
{
auto const [free, total] = this->mr->get_mem_info(rmm::cuda_stream_view{});
EXPECT_TRUE(free >= allocation_size);
}
void* ptr{nullptr};
ptr = this->mr->allocate(allocation_size);
{
auto const [free, total] = this->mr->get_mem_info(rmm::cuda_stream_view{});
EXPECT_TRUE(free >= allocation_size);
}
this->mr->deallocate(ptr, allocation_size);
} else {
auto const [free, total] = this->mr->get_mem_info(rmm::cuda_stream_view{});
EXPECT_EQ(free, 0);
EXPECT_EQ(total, 0);
}
}
// Simple reproducer for https://github.com/rapidsai/rmm/issues/861
TEST_P(mr_test, AllocationsAreDifferentDefaultStream)
{
concurrent_allocations_are_different(this->mr.get(), cuda_stream_view{});
}
TEST_P(mr_test, AllocationsAreDifferent)
{
concurrent_allocations_are_different(this->mr.get(), this->stream);
}
TEST_P(mr_allocation_test, AllocateDefaultStream)
{
test_various_allocations(this->mr.get(), cuda_stream_view{});
}
TEST_P(mr_allocation_test, AllocateOnStream)
{
test_various_allocations(this->mr.get(), this->stream);
}
TEST_P(mr_allocation_test, RandomAllocations) { test_random_allocations(this->mr.get()); }
TEST_P(mr_allocation_test, RandomAllocationsStream)
{
test_random_allocations(this->mr.get(), default_num_allocations, default_max_size, this->stream);
}
TEST_P(mr_allocation_test, MixedRandomAllocationFree)
{
test_mixed_random_allocation_free(this->mr.get(), default_max_size, cuda_stream_view{});
}
TEST_P(mr_allocation_test, MixedRandomAllocationFreeStream)
{
test_mixed_random_allocation_free(this->mr.get(), default_max_size, this->stream);
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/polymorphic_allocator_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <gtest/gtest.h>
#include <rmm/cuda_stream.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
namespace {
struct allocator_test : public ::testing::Test {
rmm::cuda_stream stream;
};
TEST_F(allocator_test, default_resource)
{
rmm::mr::polymorphic_allocator<int> allocator{};
EXPECT_EQ(allocator.resource(), rmm::mr::get_current_device_resource());
}
TEST_F(allocator_test, custom_resource)
{
rmm::mr::cuda_memory_resource mr;
rmm::mr::polymorphic_allocator<int> allocator{&mr};
EXPECT_EQ(allocator.resource(), &mr);
}
void test_conversion(rmm::mr::polymorphic_allocator<int> /*unused*/) {}
TEST_F(allocator_test, implicit_conversion)
{
rmm::mr::cuda_memory_resource mr;
test_conversion(&mr);
}
TEST_F(allocator_test, self_equality)
{
rmm::mr::polymorphic_allocator<int> allocator{};
EXPECT_EQ(allocator, allocator);
EXPECT_FALSE(allocator != allocator);
}
TEST_F(allocator_test, equal_resources)
{
rmm::mr::cuda_memory_resource mr0;
rmm::mr::polymorphic_allocator<int> alloc0{&mr0};
rmm::mr::cuda_memory_resource mr1;
rmm::mr::polymorphic_allocator<int> alloc1{&mr1};
EXPECT_EQ(alloc0, alloc1);
EXPECT_FALSE(alloc0 != alloc1);
}
TEST_F(allocator_test, unequal_resources)
{
rmm::mr::managed_memory_resource mr0;
rmm::mr::polymorphic_allocator<int> alloc0{&mr0};
rmm::mr::cuda_memory_resource mr1;
rmm::mr::polymorphic_allocator<int> alloc1{&mr1};
EXPECT_NE(alloc0, alloc1);
}
TEST_F(allocator_test, copy_ctor_same_type)
{
rmm::mr::polymorphic_allocator<int> alloc0;
rmm::mr::polymorphic_allocator<int> alloc1{alloc0};
EXPECT_EQ(alloc0, alloc1);
EXPECT_EQ(alloc0.resource(), alloc1.resource());
}
TEST_F(allocator_test, copy_ctor_different_type)
{
rmm::mr::polymorphic_allocator<int> alloc0;
rmm::mr::polymorphic_allocator<double> alloc1{alloc0};
EXPECT_EQ(alloc0, alloc1);
EXPECT_EQ(alloc0.resource(), alloc1.resource());
}
TEST_F(allocator_test, rebind)
{
using Allocator = rmm::mr::polymorphic_allocator<int>;
Allocator alloc0;
using Rebound = std::allocator_traits<Allocator>::rebind_alloc<double>;
EXPECT_TRUE((std::is_same<std::allocator_traits<Rebound>::value_type, double>::value));
}
TEST_F(allocator_test, allocate_deallocate)
{
rmm::mr::polymorphic_allocator<int> allocator{};
const auto size{1000};
auto* ptr = allocator.allocate(size, stream);
EXPECT_NE(ptr, nullptr);
EXPECT_NO_THROW(allocator.deallocate(ptr, size, stream));
}
} // namespace
| 0 |
rapidsai_public_repos/rmm/tests/mr
|
rapidsai_public_repos/rmm/tests/mr/device/tracking_mr_tests.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../../byte_literals.hpp"
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/logger.hpp>
#include <rmm/mr/device/tracking_resource_adaptor.hpp>
#include <gtest/gtest.h>
#include <spdlog/sinks/ostream_sink.h>
namespace rmm::test {
namespace {
using tracking_adaptor = rmm::mr::tracking_resource_adaptor<rmm::mr::device_memory_resource>;
constexpr auto num_allocations{10};
constexpr auto num_more_allocations{5};
constexpr auto ten_MiB{10_MiB};
TEST(TrackingTest, ThrowOnNullUpstream)
{
auto construct_nullptr = []() { tracking_adaptor mr{nullptr}; };
EXPECT_THROW(construct_nullptr(), rmm::logic_error);
}
TEST(TrackingTest, Empty)
{
tracking_adaptor mr{rmm::mr::get_current_device_resource()};
EXPECT_EQ(mr.get_outstanding_allocations().size(), 0);
EXPECT_EQ(mr.get_allocated_bytes(), 0);
}
TEST(TrackingTest, AllFreed)
{
tracking_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
allocations.reserve(num_allocations);
for (int i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
for (auto* alloc : allocations) {
mr.deallocate(alloc, ten_MiB);
}
EXPECT_EQ(mr.get_outstanding_allocations().size(), 0);
EXPECT_EQ(mr.get_allocated_bytes(), 0);
}
TEST(TrackingTest, AllocationsLeftWithStacks)
{
tracking_adaptor mr{rmm::mr::get_current_device_resource(), true};
std::vector<void*> allocations;
allocations.reserve(num_allocations);
for (int i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
for (int i = 0; i < num_allocations; i += 2) {
mr.deallocate(allocations[i], ten_MiB);
}
EXPECT_EQ(mr.get_outstanding_allocations().size(), num_allocations / 2);
EXPECT_EQ(mr.get_allocated_bytes(), ten_MiB * (num_allocations / 2));
auto const& outstanding_allocations = mr.get_outstanding_allocations();
EXPECT_EQ(outstanding_allocations.size(), num_allocations / 2);
EXPECT_NE(outstanding_allocations.begin()->second.strace, nullptr);
}
TEST(TrackingTest, AllocationsLeftWithoutStacks)
{
tracking_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
allocations.reserve(num_allocations);
for (int i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
for (int i = 0; i < num_allocations; i += 2) {
mr.deallocate(allocations[i], ten_MiB);
}
EXPECT_EQ(mr.get_outstanding_allocations().size(), num_allocations / 2);
EXPECT_EQ(mr.get_allocated_bytes(), ten_MiB * (num_allocations / 2));
auto const& outstanding_allocations = mr.get_outstanding_allocations();
EXPECT_EQ(outstanding_allocations.size(), num_allocations / 2);
EXPECT_EQ(outstanding_allocations.begin()->second.strace, nullptr);
}
TEST(TrackingTest, MultiTracking)
{
tracking_adaptor mr{rmm::mr::get_current_device_resource(), true};
rmm::mr::set_current_device_resource(&mr);
std::vector<std::shared_ptr<rmm::device_buffer>> allocations;
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.emplace_back(
std::make_shared<rmm::device_buffer>(ten_MiB, rmm::cuda_stream_default));
}
EXPECT_EQ(mr.get_outstanding_allocations().size(), num_allocations);
tracking_adaptor inner_mr{rmm::mr::get_current_device_resource()};
rmm::mr::set_current_device_resource(&inner_mr);
for (std::size_t i = 0; i < num_more_allocations; ++i) {
allocations.emplace_back(
std::make_shared<rmm::device_buffer>(ten_MiB, rmm::cuda_stream_default));
}
// Check the allocated bytes for both MRs
EXPECT_EQ(mr.get_outstanding_allocations().size(), num_allocations + num_more_allocations);
EXPECT_EQ(inner_mr.get_outstanding_allocations().size(), num_more_allocations);
EXPECT_EQ(mr.get_allocated_bytes(), ten_MiB * (num_allocations + num_more_allocations));
EXPECT_EQ(inner_mr.get_allocated_bytes(), ten_MiB * num_more_allocations);
EXPECT_GT(mr.get_outstanding_allocations_str().size(), 0);
// Clear the allocations, causing all memory to be freed
allocations.clear();
// The current allocations for both MRs should be 0
EXPECT_EQ(mr.get_outstanding_allocations().size(), 0);
EXPECT_EQ(inner_mr.get_outstanding_allocations().size(), 0);
EXPECT_EQ(mr.get_allocated_bytes(), 0);
EXPECT_EQ(inner_mr.get_allocated_bytes(), 0);
// Reset the current device resource
rmm::mr::set_current_device_resource(mr.get_upstream());
}
TEST(TrackingTest, NegativeInnerTracking)
{
// This tests the unlikely scenario where pointers are deallocated on an inner
// wrapped memory resource. This can happen if the MR is not saved with the
// memory pointer
tracking_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
EXPECT_EQ(mr.get_outstanding_allocations().size(), num_allocations);
tracking_adaptor inner_mr{&mr};
// Add more allocations
for (std::size_t i = 0; i < num_more_allocations; ++i) {
allocations.push_back(inner_mr.allocate(ten_MiB));
}
// Check the outstanding allocations
EXPECT_EQ(mr.get_outstanding_allocations().size(), num_allocations + num_more_allocations);
EXPECT_EQ(inner_mr.get_outstanding_allocations().size(), num_more_allocations);
// Deallocate all allocations using the inner_mr
for (auto& allocation : allocations) {
inner_mr.deallocate(allocation, ten_MiB);
}
allocations.clear();
// Check the outstanding allocations are all 0
EXPECT_EQ(mr.get_outstanding_allocations().size(), 0);
EXPECT_EQ(inner_mr.get_outstanding_allocations().size(), 0);
}
TEST(TrackingTest, DeallocWrongBytes)
{
tracking_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
// When deallocating, pass the wrong bytes to deallocate
for (auto& allocation : allocations) {
mr.deallocate(allocation, ten_MiB / 2);
}
allocations.clear();
EXPECT_EQ(mr.get_outstanding_allocations().size(), 0);
EXPECT_EQ(mr.get_allocated_bytes(), 0);
// Verify current allocations are correct despite the error
EXPECT_EQ(mr.get_allocated_bytes(), 0);
}
TEST(TrackingTest, LogOutstandingAllocations)
{
std::ostringstream oss;
auto oss_sink = std::make_shared<spdlog::sinks::ostream_sink_st>(oss);
rmm::logger().sinks().push_back(oss_sink);
auto old_level = rmm::logger().level();
tracking_adaptor mr{rmm::mr::get_current_device_resource()};
std::vector<void*> allocations;
for (std::size_t i = 0; i < num_allocations; ++i) {
allocations.push_back(mr.allocate(ten_MiB));
}
rmm::logger().set_level(spdlog::level::debug);
EXPECT_NO_THROW(mr.log_outstanding_allocations());
#if SPDLOG_ACTIVE_LEVEL <= SPDLOG_LEVEL_DEBUG
EXPECT_NE(oss.str().find("Outstanding Allocations"), std::string::npos);
#endif
for (auto& allocation : allocations) {
mr.deallocate(allocation, ten_MiB);
}
rmm::logger().set_level(old_level);
rmm::logger().sinks().pop_back();
}
} // namespace
} // namespace rmm::test
| 0 |
rapidsai_public_repos/rmm/conda
|
rapidsai_public_repos/rmm/conda/environments/all_cuda-120_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- breathe
- c-compiler
- clang-tools==16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvcc
- cuda-python>=12.0,<13.0a0
- cuda-version=12.0
- cxx-compiler
- cython>=3.0.0
- doxygen=1.9.1
- fmt>=9.1.0,<10
- gcc_linux-64=11.*
- gcovr>=5.0
- graphviz
- identify>=2.5.20
- ipython
- make
- nbsphinx
- ninja
- numba>=0.57
- numpy>=1.21
- numpydoc
- pre-commit
- pytest
- pytest-cov
- python>=3.9,<3.11
- scikit-build>=0.13.1
- spdlog>=1.11.0,<1.12
- sphinx
- sphinx-copybutton
- sphinx-markdown-tables
- sphinx_rtd_theme
- sysroot_linux-64==2.17
- tomli
name: all_cuda-120_arch-x86_64
| 0 |
rapidsai_public_repos/rmm/conda
|
rapidsai_public_repos/rmm/conda/environments/all_cuda-118_arch-x86_64.yaml
|
# This file is generated by `rapids-dependency-file-generator`.
# To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`.
channels:
- rapidsai
- conda-forge
dependencies:
- breathe
- c-compiler
- clang-tools==16.0.6
- clang==16.0.6
- cmake>=3.26.4
- cuda-nvcc
- cuda-python>=11.7.1,<12.0a0
- cuda-version=11.8
- cudatoolkit
- cxx-compiler
- cython>=3.0.0
- doxygen=1.9.1
- fmt>=9.1.0,<10
- gcc_linux-64=11.*
- gcovr>=5.0
- graphviz
- identify>=2.5.20
- ipython
- make
- nbsphinx
- ninja
- numba>=0.57
- numpy>=1.21
- numpydoc
- nvcc_linux-64=11.8
- pre-commit
- pytest
- pytest-cov
- python>=3.9,<3.11
- scikit-build>=0.13.1
- spdlog>=1.11.0,<1.12
- sphinx
- sphinx-copybutton
- sphinx-markdown-tables
- sphinx_rtd_theme
- sysroot_linux-64==2.17
- tomli
name: all_cuda-118_arch-x86_64
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/librmm/install_librmm_tests.sh
|
#!/bin/bash
cmake --install build --component testing
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/librmm/install_librmm.sh
|
#!/bin/bash
cmake --install build
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/librmm/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
cmake_version:
- ">=3.26.4"
fmt_version:
- ">=9.1.0,<10"
gtest_version:
- ">=1.13.0"
spdlog_version:
- ">=1.11.0,<1.12"
sysroot_version:
- "2.17"
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/librmm/build.sh
|
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
./build.sh -n -v clean librmm tests benchmarks --cmake-args=\"-DCMAKE_INSTALL_LIBDIR=lib\"
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/librmm/meta.yaml
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set cuda_spec = ">=" + cuda_major ~ ",<" + (cuda_major | int + 1) ~ ".0a0" %} # i.e. >=11,<12.0a0
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: librmm-split
source:
path: ../../..
requirements:
build:
- cmake {{ cmake_version }}
- ninja
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} {{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cuda-version ={{ cuda_version }}
# We require spdlog and fmt (which was devendored from spdlog
# conda-forge packages in 1.11.0) so that the spdlog headers are not
# pulled by CPM and installed as a part of the rmm packages. However,
# building against librmm still requires these headers. They are also
# added as a run requirement via the packages' run_exports.
- fmt {{ fmt_version }}
- spdlog {{ spdlog_version }}
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
build:
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- PARALLEL_LEVEL
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=librmm-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=librmm-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
outputs:
- name: librmm
version: {{ version }}
script: install_librmm.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
run_exports:
- {{ pin_subpackage("librmm", max_pin="x.x") }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
run:
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- cuda-version {{ cuda_spec }}
- fmt {{ fmt_version }}
- spdlog {{ spdlog_version }}
test:
commands:
- test -f $PREFIX/include/rmm/thrust_rmm_allocator.h
- test -f $PREFIX/include/rmm/logger.hpp
- test -f $PREFIX/include/rmm/cuda_stream.hpp
- test -f $PREFIX/include/rmm/cuda_stream_view.hpp
- test -f $PREFIX/include/rmm/cuda_stream_pool.hpp
- test -f $PREFIX/include/rmm/device_uvector.hpp
- test -f $PREFIX/include/rmm/device_scalar.hpp
- test -f $PREFIX/include/rmm/device_buffer.hpp
- test -f $PREFIX/include/rmm/detail/aligned.hpp
- test -f $PREFIX/include/rmm/detail/error.hpp
- test -f $PREFIX/include/rmm/detail/exec_check_disable.hpp
- test -f $PREFIX/include/rmm/mr/device/detail/arena.hpp
- test -f $PREFIX/include/rmm/mr/device/detail/free_list.hpp
- test -f $PREFIX/include/rmm/mr/device/detail/coalescing_free_list.hpp
- test -f $PREFIX/include/rmm/mr/device/detail/fixed_size_free_list.hpp
- test -f $PREFIX/include/rmm/mr/device/detail/stream_ordered_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/arena_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/binning_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/cuda_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/device_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/fixed_size_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/limiting_resource_adaptor.hpp
- test -f $PREFIX/include/rmm/mr/device/logging_resource_adaptor.hpp
- test -f $PREFIX/include/rmm/mr/device/managed_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/owning_wrapper.hpp
- test -f $PREFIX/include/rmm/mr/device/per_device_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/pool_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/device/thread_safe_resource_adaptor.hpp
- test -f $PREFIX/include/rmm/mr/device/thrust_allocator_adaptor.hpp
- test -f $PREFIX/include/rmm/mr/host/host_memory_resource.hpp
- test -f $PREFIX/include/rmm/mr/host/new_delete_resource.hpp
- test -f $PREFIX/include/rmm/mr/host/pinned_memory_resource.hpp
about:
home: https://rapids.ai/
license: Apache-2.0
summary: librmm library
- name: librmm-tests
version: {{ version }}
script: install_librmm_tests.sh
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
{% else %}
- cuda-cudart-dev
{% endif %}
run:
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- {{ pin_subpackage('librmm', exact=True) }}
- gtest {{ gtest_version }}
- gmock {{ gtest_version }}
about:
home: https://rapids.ai/
license: Apache-2.0
summary: librmm test & benchmark executables
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/rmm/conda_build_config.yaml
|
c_compiler_version:
- 11
cxx_compiler_version:
- 11
cuda_compiler:
- cuda-nvcc
cuda11_compiler:
- nvcc
sysroot_version:
- "2.17"
cmake_version:
- ">=3.26.4"
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/rmm/build.sh
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
# Script assumes the script is executed from the root of the repo directory
./build.sh -v clean rmm --cmake-args=\"-DCMAKE_INSTALL_LIBDIR=lib\"
| 0 |
rapidsai_public_repos/rmm/conda/recipes
|
rapidsai_public_repos/rmm/conda/recipes/rmm/meta.yaml
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set cuda_version = '.'.join(environ['RAPIDS_CUDA_VERSION'].split('.')[:2]) %}
{% set cuda_major = cuda_version.split('.')[0] %}
{% set py_version = environ['CONDA_PY'] %}
{% set date_string = environ['RAPIDS_DATE_STRING'] %}
package:
name: rmm
version: {{ version }}
source:
path: ../../..
build:
number: {{ GIT_DESCRIBE_NUMBER }}
string: cuda{{ cuda_major }}_py{{ py_version }}_{{ date_string }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }}
script_env:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_SESSION_TOKEN
- CMAKE_C_COMPILER_LAUNCHER
- CMAKE_CUDA_COMPILER_LAUNCHER
- CMAKE_CXX_COMPILER_LAUNCHER
- CMAKE_GENERATOR
- SCCACHE_BUCKET
- SCCACHE_IDLE_TIMEOUT
- SCCACHE_REGION
- SCCACHE_S3_KEY_PREFIX=rmm-aarch64 # [aarch64]
- SCCACHE_S3_KEY_PREFIX=rmm-linux64 # [linux64]
- SCCACHE_S3_USE_SSL
- SCCACHE_S3_NO_CREDENTIALS
ignore_run_exports_from:
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }}
{% endif %}
requirements:
build:
- cmake {{ cmake_version }}
- ninja
- {{ compiler('c') }}
- {{ compiler('cxx') }}
{% if cuda_major == "11" %}
- {{ compiler('cuda11') }} {{ cuda_version }}
{% else %}
- {{ compiler('cuda') }}
{% endif %}
- cuda-version ={{ cuda_version }}
- sysroot_{{ target_platform }} {{ sysroot_version }}
host:
- cuda-version ={{ cuda_version }}
{% if cuda_major == "11" %}
- cudatoolkit
- cuda-python ==11.7.1
{% else %}
- cuda-cudart-dev
- cuda-python ==12.0.0
{% endif %}
- cython >=3.0.0
- librmm ={{ version }}
- python
- scikit-build >=0.13.1
- setuptools >=61.0.0
- tomli # [py<311]
run:
{% if cuda_major == "11" %}
- cudatoolkit
{% endif %}
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}
- numba >=0.57
- numpy >=1.21
test:
imports:
- rmm
about:
home: https://rapids.ai/
license: Apache-2.0
license_family: Apache
license_file: LICENSE
summary: rmm library
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/scripts/gdb-pretty-printers.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gdb
class HostIterator:
"""Iterates over arrays in host memory."""
def __init__(self, start, size):
self.item = start
self.size = size
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.count >= self.size:
raise StopIteration
elt = self.item.dereference()
count = self.count
self.item += 1
self.count += 1
return (f"[{count}]", elt)
class DeviceIterator:
"""Iterates over device arrays by copying chunks to the host."""
def __init__(self, start, size):
self.exec = exec
self.item = start
self.size = size
self.count = 0
self.buffer = None
self.sizeof = self.item.dereference().type.sizeof
self.buffer_start = 0
# At most 1 MB or size, at least 1
self.buffer_size = min(size, max(1, 2**20 // self.sizeof))
self.buffer = gdb.parse_and_eval(
f"(void*)malloc({self.buffer_size * self.sizeof})"
)
self.buffer.fetch_lazy()
self.buffer_count = self.buffer_size
self.update_buffer()
def update_buffer(self):
if self.buffer_count >= self.buffer_size:
self.buffer_item = gdb.parse_and_eval(hex(self.buffer)).cast(
self.item.type
)
self.buffer_count = 0
self.buffer_start = self.count
device_addr = hex(self.item.dereference().address)
buffer_addr = hex(self.buffer)
size = (
min(self.buffer_size, self.size - self.buffer_start)
* self.sizeof
)
status = gdb.parse_and_eval(
f"(cudaError)cudaMemcpy({buffer_addr}, {device_addr}, {size}, "
"cudaMemcpyDeviceToHost)"
)
if status != 0:
raise gdb.MemoryError(f"memcpy from device failed: {status}")
def __del__(self):
gdb.parse_and_eval(f"(void)free({hex(self.buffer)})").fetch_lazy()
def __iter__(self):
return self
def __next__(self):
if self.count >= self.size:
raise StopIteration
self.update_buffer()
elt = self.buffer_item.dereference()
self.buffer_item += 1
self.buffer_count += 1
count = self.count
self.item += 1
self.count += 1
return (f"[{count}]", elt)
class RmmDeviceUVectorPrinter(gdb.printing.PrettyPrinter):
"""Print a rmm::device_uvector."""
def __init__(self, val):
self.val = val
el_type = val.type.template_argument(0)
self.pointer = val["_storage"]["_data"].cast(el_type.pointer())
self.size = int(val["_storage"]["_size"]) // el_type.sizeof
self.capacity = int(val["_storage"]["_capacity"]) // el_type.sizeof
def children(self):
return DeviceIterator(self.pointer, self.size)
def to_string(self):
return (
f"{self.val.type} of length {self.size}, capacity {self.capacity}"
)
def display_hint(self):
return "array"
# Workaround to avoid using the pretty printer on things like
# std::vector<int>::iterator
def is_template_type_not_alias(typename):
loc = typename.find("<")
if loc is None:
return False
depth = 0
for char in typename[loc:-1]:
if char == "<":
depth += 1
if char == ">":
depth -= 1
if depth == 0:
return False
return True
def template_match(typename, template_name):
return typename.startswith(template_name + "<") and typename.endswith(">")
def lookup_rmm_type(val):
if not str(val.type.unqualified()).startswith("rmm::"):
return None
suffix = str(val.type.unqualified())[5:]
if not is_template_type_not_alias(suffix):
return None
if template_match(suffix, "device_uvector"):
return RmmDeviceUVectorPrinter(val)
return None
gdb.pretty_printers.append(lookup_rmm_type)
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/scripts/doxygen.sh
|
#!/bin/bash
# Copyright (c) 2023, NVIDIA CORPORATION.
##############################
# RMM doxygen warnings check #
##############################
# skip if doxygen is not installed
if ! [ -x "$(command -v doxygen)" ]; then
echo -e "warning: doxygen is not installed"
exit 0
fi
# Utility to return version as number for comparison
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
# doxygen supported version 1.9.1
DOXYGEN_VERSION=`doxygen --version`
if [ ! $(version "$DOXYGEN_VERSION") -eq $(version "1.9.1") ] ; then
echo -e "warning: Unsupported doxygen version $DOXYGEN_VERSION"
echo -e "Expecting doxygen version 1.9.1"
exit 0
fi
# Run doxygen, ignore missing tag files error
TAG_ERROR1="error: Tag file '.*.tag' does not exist or is not a file. Skipping it..."
TAG_ERROR2="error: cannot open tag file .*.tag for writing"
DOXYGEN_STDERR=`cd doxygen && { cat Doxyfile ; echo QUIET = YES; echo GENERATE_HTML = NO; } | doxygen - 2>&1 | sed "/\($TAG_ERROR1\|$TAG_ERROR2\)/d"`
RETVAL=$?
if [ "$RETVAL" != "0" ] || [ ! -z "$DOXYGEN_STDERR" ]; then
echo -e "$DOXYGEN_STDERR"
RETVAL=1 #because return value is not generated by doxygen 1.8.20
fi
exit $RETVAL
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/scripts/load-pretty-printers.in
|
source @Thrust_SOURCE_DIR@/scripts/gdb-pretty-printers.py
source @PROJECT_SOURCE_DIR@/scripts/gdb-pretty-printers.py
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/scripts/run-cmake-format.sh
|
#!/bin/bash
# This script is a wrapper for cmakelang that may be used with pre-commit. The
# wrapping is necessary because RAPIDS libraries split configuration for
# cmakelang linters between a local config file and a second config file that's
# shared across all of RAPIDS via rapids-cmake. In order to keep it up to date
# this file is only maintained in one place (the rapids-cmake repo) and
# pulled down during builds. We need a way to invoke CMake linting commands
# without causing pre-commit failures (which could block local commits or CI),
# while also being sufficiently flexible to allow users to maintain the config
# file independently of a build directory.
#
# This script provides the minimal functionality to enable those use cases. It
# searches in a number of predefined locations for the rapids-cmake config file
# and exits gracefully if the file is not found. If a user wishes to specify a
# config file at a nonstandard location, they may do so by setting the
# environment variable RAPIDS_CMAKE_FORMAT_FILE.
#
# This script can be invoked directly anywhere within the project repository.
# Alternatively, it may be invoked as a pre-commit hook via
# `pre-commit run (cmake-format)|(cmake-lint)`.
#
# Usage:
# bash run-cmake-format.sh {cmake-format,cmake-lint} infile [infile ...]
status=0
if [ -z ${RMM_ROOT:+PLACEHOLDER} ]; then
RMM_BUILD_DIR=$(git rev-parse --show-toplevel 2>&1)/build
status=$?
else
RMM_BUILD_DIR=${RMM_ROOT}
fi
if ! [ ${status} -eq 0 ]; then
if [[ ${RMM_BUILD_DIR} == *"not a git repository"* ]]; then
echo "This script must be run inside the rmm repository, or the RMM_ROOT environment variable must be set."
else
echo "Script failed with unknown error attempting to determine project root:"
echo ${RMM_BUILD_DIR}
fi
exit 1
fi
DEFAULT_FORMAT_FILE_LOCATIONS=(
"${RMM_BUILD_DIR}/_deps/rapids-cmake-src/cmake-format-rapids-cmake.json"
)
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
for file_path in ${DEFAULT_FORMAT_FILE_LOCATIONS[@]}; do
if [ -f ${file_path} ]; then
RAPIDS_CMAKE_FORMAT_FILE=${file_path}
break
fi
done
fi
if [ -z ${RAPIDS_CMAKE_FORMAT_FILE:+PLACEHOLDER} ]; then
echo "The rapids-cmake cmake-format configuration file was not found at any of the default search locations: "
echo ""
( IFS=$'\n'; echo "${DEFAULT_FORMAT_FILE_LOCATIONS[*]}" )
echo ""
echo "Try setting the environment variable RAPIDS_CMAKE_FORMAT_FILE to the path to the config file."
exit 0
else
echo "Using format file ${RAPIDS_CMAKE_FORMAT_FILE}"
fi
if [[ $1 == "cmake-format" ]]; then
cmake-format -i --config-files cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2}
elif [[ $1 == "cmake-lint" ]]; then
cmake-lint --config-files cmake/config.json ${RAPIDS_CMAKE_FORMAT_FILE} -- ${@:2}
fi
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/benchmarks/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
# Build options
option(DISABLE_DEPRECATION_WARNING "Disable warnings generated from deprecated declarations." OFF)
option(PER_THREAD_DEFAULT_STREAM "Build with per-thread default stream" OFF)
if(PER_THREAD_DEFAULT_STREAM)
message(STATUS "RMM: Building benchmarks with per-thread default stream")
endif()
# compiler function
# This function takes in a benchmark name and benchmark source and handles setting all of the
# associated properties and linking to build the benchmark
function(ConfigureBench BENCH_NAME)
add_executable(${BENCH_NAME} ${ARGN}
"${CMAKE_CURRENT_SOURCE_DIR}/synchronization/synchronization.cpp")
target_include_directories(${BENCH_NAME} PRIVATE "$<BUILD_INTERFACE:${RMM_SOURCE_DIR}>")
set_target_properties(
${BENCH_NAME}
PROPERTIES POSITION_INDEPENDENT_CODE ON
RUNTIME_OUTPUT_DIRECTORY "$<BUILD_INTERFACE:${RMM_BINARY_DIR}/gbenchmarks>"
CUDA_ARCHITECTURES "${CMAKE_CUDA_ARCHITECTURES}"
INSTALL_RPATH "\$ORIGIN/../../../lib")
target_link_libraries(${BENCH_NAME} benchmark::benchmark pthread rmm)
target_compile_definitions(${BENCH_NAME}
PUBLIC "SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}")
if(PER_THREAD_DEFAULT_STREAM)
target_compile_definitions(${BENCH_NAME} PUBLIC CUDA_API_PER_THREAD_DEFAULT_STREAM)
endif()
target_compile_options(
${BENCH_NAME} PUBLIC $<$<COMPILE_LANG_AND_ID:CXX,GNU,Clang>:-Wall -Werror
-Wno-error=deprecated-declarations -Wno-unknown-pragmas>)
if(DISABLE_DEPRECATION_WARNING)
target_compile_options(
${BENCH_NAME} PUBLIC $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler=-Wno-deprecated-declarations>)
target_compile_options(${BENCH_NAME}
PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-Wno-deprecated-declarations>)
endif()
install(
TARGETS ${BENCH_NAME}
COMPONENT testing
DESTINATION bin/benchmarks/librmm
EXCLUDE_FROM_ALL)
endfunction(ConfigureBench)
# random allocations benchmark
ConfigureBench(RANDOM_ALLOCATIONS_BENCH random_allocations/random_allocations.cpp)
# replay benchmark
ConfigureBench(REPLAY_BENCH replay/replay.cpp)
# uvector benchmark
ConfigureBench(UVECTOR_BENCH device_uvector/device_uvector_bench.cu)
# cuda_stream_pool benchmark
ConfigureBench(CUDA_STREAM_POOL_BENCH cuda_stream_pool/cuda_stream_pool_bench.cpp)
# multi stream allocations
ConfigureBench(MULTI_STREAM_ALLOCATIONS_BENCH
multi_stream_allocations/multi_stream_allocations_bench.cu)
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/replay/replay.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/utilities/cxxopts.hpp>
#include <benchmarks/utilities/log_parser.hpp>
#include <benchmarks/utilities/simulated_memory_resource.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/binning_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <benchmark/benchmark.h>
#include <spdlog/common.h>
#include <atomic>
#include <chrono>
#include <iterator>
#include <memory>
#include <numeric>
#include <string>
#include <thread>
/// MR factory functions
std::shared_ptr<rmm::mr::device_memory_resource> make_cuda(std::size_t = 0)
{
return std::make_shared<rmm::mr::cuda_memory_resource>();
}
std::shared_ptr<rmm::mr::device_memory_resource> make_simulated(std::size_t simulated_size)
{
return std::make_shared<rmm::mr::simulated_memory_resource>(simulated_size);
}
inline auto make_pool(std::size_t simulated_size)
{
if (simulated_size > 0) {
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(
make_simulated(simulated_size), simulated_size, simulated_size);
}
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda());
}
inline auto make_arena(std::size_t simulated_size)
{
if (simulated_size > 0) {
return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(
make_simulated(simulated_size), simulated_size, simulated_size);
}
return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(make_cuda());
}
inline auto make_binning(std::size_t simulated_size)
{
auto pool = make_pool(simulated_size);
auto mr = rmm::mr::make_owning_wrapper<rmm::mr::binning_memory_resource>(pool);
const auto min_size_exp{18};
const auto max_size_exp{22};
for (std::size_t i = min_size_exp; i <= max_size_exp; i++) {
mr->wrapped().add_bin(1 << i);
}
return mr;
}
using MRFactoryFunc = std::function<std::shared_ptr<rmm::mr::device_memory_resource>(std::size_t)>;
/**
* @brief Represents an allocation made during the replay
*
*/
struct allocation {
allocation() = default;
void* ptr{};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
std::size_t size{};
};
/**
* @brief Function object for running a replay benchmark with the specified
* `device_memory_resource`.
*
* @tparam MR The type of the `device_memory_resource` to use for allocation
* replay
*/
struct replay_benchmark {
MRFactoryFunc factory_;
std::size_t simulated_size_;
std::shared_ptr<rmm::mr::device_memory_resource> mr_{};
std::vector<std::vector<rmm::detail::event>> const& events_{};
// Maps a pointer from the event log to an active allocation
std::unordered_map<uintptr_t, allocation> allocation_map;
std::condition_variable cv; // to ensure in-order playback
std::mutex event_mutex; // to make event_index and allocation_map thread-safe
std::size_t event_index{0}; // playback index
/**
* @brief Construct a `replay_benchmark` from a list of events and
* set of arguments forwarded to the MR constructor.
*
* @param factory A factory function to create the memory resource
* @param events The set of allocation events to replay
* @param args Variable number of arguments forward to the constructor of MR
*/
replay_benchmark(MRFactoryFunc factory,
std::size_t simulated_size,
std::vector<std::vector<rmm::detail::event>> const& events)
: factory_{std::move(factory)},
simulated_size_{simulated_size},
events_{events},
allocation_map{events.size()}
{
}
/**
* @brief Move construct a replay_benchmark (needed by RegisterBenchmark)
*
* Does not copy the mutex or the map
*/
replay_benchmark(replay_benchmark&& other) noexcept
: factory_{std::move(other.factory_)},
simulated_size_{other.simulated_size_},
mr_{std::move(other.mr_)},
events_{other.events_},
allocation_map{std::move(other.allocation_map)}
{
}
~replay_benchmark() = default;
replay_benchmark(replay_benchmark const&) = delete;
replay_benchmark& operator=(replay_benchmark const&) = delete;
replay_benchmark& operator=(replay_benchmark&& other) noexcept = delete;
/// Add an allocation to the map (NOT thread safe)
void set_allocation(uintptr_t ptr, allocation alloc) { allocation_map.insert({ptr, alloc}); }
/// Remove an allocation from the map (NOT thread safe)
allocation remove_allocation(uintptr_t ptr)
{
auto iter = allocation_map.find(ptr);
if (iter != allocation_map.end()) {
allocation alloc = iter->second;
allocation_map.erase(iter);
return alloc;
}
return allocation{};
}
/// Create the memory resource shared by all threads before the benchmark runs
void SetUp(const ::benchmark::State& state)
{
if (state.thread_index() == 0) {
rmm::logger().log(spdlog::level::info, "------ Start of Benchmark -----");
mr_ = factory_(simulated_size_);
}
}
/// Destroy the memory resource and count any unallocated memory
void TearDown(const ::benchmark::State& state)
{
if (state.thread_index() == 0) {
rmm::logger().log(spdlog::level::info, "------ End of Benchmark -----");
// clean up any leaked allocations
std::size_t total_leaked{0};
std::size_t num_leaked{0};
for (auto const& ptr_alloc : allocation_map) {
auto alloc = ptr_alloc.second;
num_leaked++;
total_leaked += alloc.size;
mr_->deallocate(alloc.ptr, alloc.size);
}
if (num_leaked > 0) {
std::cout << "LOG shows leak of " << num_leaked << " allocations of " << total_leaked
<< " total bytes\n";
}
allocation_map.clear();
mr_.reset();
}
}
/// Run the replay benchmark
void operator()(::benchmark::State& state)
{
SetUp(state);
auto const& my_events = events_.at(state.thread_index());
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
std::for_each(my_events.begin(), my_events.end(), [this](auto event) {
// ensure correct ordering between threads
std::unique_lock<std::mutex> lock{event_mutex};
if (event_index != event.index) {
cv.wait(lock, [&]() { return event_index == event.index; });
}
// rmm::detail::action::ALLOCATE_FAILURE is ignored.
if (rmm::detail::action::ALLOCATE == event.act) {
auto ptr = mr_->allocate(event.size);
set_allocation(event.pointer, allocation{ptr, event.size});
} else if (rmm::detail::action::FREE == event.act) {
auto alloc = remove_allocation(event.pointer);
mr_->deallocate(alloc.ptr, event.size);
}
event_index++;
cv.notify_all();
});
}
TearDown(state);
}
};
/**
* @brief Processes a log file into a set of per-thread vectors of events
*
* @param filename Name of log file
* @return A vector of events for each thread in the log
*/
std::vector<std::vector<rmm::detail::event>> parse_per_thread_events(std::string const& filename)
{
using rmm::detail::event;
std::vector<event> all_events = rmm::detail::parse_csv(filename);
RMM_EXPECTS(std::all_of(all_events.begin(),
all_events.end(),
[](auto const& event) {
cudaStream_t custream;
memcpy(&custream, &event.stream, sizeof(cudaStream_t));
auto stream = rmm::cuda_stream_view{custream};
return stream.is_default() or stream.is_per_thread_default();
}),
"Non-default streams not currently supported.");
// Sort events by thread id
std::stable_sort(all_events.begin(), all_events.end(), [](auto lhs, auto rhs) {
return lhs.thread_id < rhs.thread_id;
});
// Count the number of events per thread
std::vector<std::size_t> events_per_thread{};
thrust::reduce_by_key(
thrust::host,
all_events.begin(),
all_events.end(),
thrust::make_constant_iterator(1),
thrust::make_discard_iterator(),
std::back_inserter(events_per_thread),
[](event const& lhs, event const& rhs) { return lhs.thread_id == rhs.thread_id; });
auto const num_threads = events_per_thread.size();
// Copy each thread's events into its own vector
std::vector<std::vector<event>> per_thread_events(num_threads);
std::transform(events_per_thread.begin(),
events_per_thread.end(),
per_thread_events.begin(),
[&all_events, offset = 0](auto num_events) mutable {
auto begin = offset;
offset += num_events;
auto end = offset;
std::vector<event> thread_events(all_events.cbegin() + begin,
all_events.cbegin() + end);
// sort into original order
std::sort(thread_events.begin(), thread_events.end(), [](auto lhs, auto rhs) {
return lhs.index < rhs.index;
});
return thread_events;
});
return per_thread_events;
}
void declare_benchmark(std::string const& name,
std::size_t simulated_size,
std::vector<std::vector<rmm::detail::event>> const& per_thread_events,
std::size_t num_threads)
{
if (name == "cuda") {
benchmark::RegisterBenchmark("CUDA Resource",
replay_benchmark(&make_cuda, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else if (name == "binning") {
benchmark::RegisterBenchmark("Binning Resource",
replay_benchmark(&make_binning, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else if (name == "pool") {
benchmark::RegisterBenchmark("Pool Resource",
replay_benchmark(&make_pool, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else if (name == "arena") {
benchmark::RegisterBenchmark("Arena Resource",
replay_benchmark(&make_arena, simulated_size, per_thread_events))
->Unit(benchmark::kMillisecond)
->Threads(static_cast<int>(num_threads));
} else {
std::cout << "Error: invalid memory_resource name: " << name << "\n";
}
}
// Usage: REPLAY_BENCHMARK -f "path/to/log/file"
int main(int argc, char** argv)
{
try {
// benchmark::Initialize will remove GBench command line arguments it
// recognizes and leave any remaining arguments
::benchmark::Initialize(&argc, argv);
// Parse for replay arguments:
auto args = [&argc, &argv]() {
cxxopts::Options options(
"RMM Replay Benchmark",
"Replays and benchmarks allocation activity captured from RMM logging.");
options.add_options()("f,file", "Name of RMM log file.", cxxopts::value<std::string>());
options.add_options()("r,resource",
"Type of device_memory_resource",
cxxopts::value<std::string>()->default_value("pool"));
options.add_options()(
"s,size",
"Size of simulated GPU memory in GiB. Not supported for the cuda memory "
"resource.",
cxxopts::value<float>()->default_value("0"));
options.add_options()("v,verbose",
"Enable verbose printing of log events",
cxxopts::value<bool>()->default_value("false"));
auto args = options.parse(argc, argv);
if (args.count("file") == 0) {
std::cout << options.help() << std::endl;
exit(0);
}
return args;
}();
auto filename = args["file"].as<std::string>();
auto per_thread_events = [filename]() {
try {
auto events = parse_per_thread_events(filename);
return events;
} catch (std::exception const& e) {
std::cout << "Failed to parse events: " << e.what() << std::endl;
return std::vector<std::vector<rmm::detail::event>>{};
}
}();
#ifdef CUDA_API_PER_THREAD_DEFAULT_STREAM
std::cout << "Using CUDA per-thread default stream.\n";
#endif
auto const simulated_size =
static_cast<std::size_t>(args["size"].as<float>() * static_cast<float>(1U << 30U));
if (simulated_size != 0 && args["resource"].as<std::string>() != "cuda") {
std::cout << "Simulating GPU with memory size of " << simulated_size << " bytes.\n";
}
std::cout << "Total Events: "
<< std::accumulate(
per_thread_events.begin(),
per_thread_events.end(),
0,
[](std::size_t accum, auto const& events) { return accum + events.size(); })
<< std::endl;
for (std::size_t thread = 0; thread < per_thread_events.size(); ++thread) {
std::cout << "Thread " << thread << ": " << per_thread_events[thread].size() << " events\n";
if (args["verbose"].as<bool>()) {
for (auto const& event : per_thread_events[thread]) {
std::cout << event << std::endl;
}
}
}
auto const num_threads = per_thread_events.size();
// Uncomment to enable / change default log level
// rmm::logger().set_level(spdlog::level::trace);
if (args.count("resource") > 0) {
std::string mr_name = args["resource"].as<std::string>();
declare_benchmark(mr_name, simulated_size, per_thread_events, num_threads);
} else {
std::array<std::string, 4> mrs{"pool", "arena", "binning", "cuda"};
std::for_each(std::cbegin(mrs),
std::cend(mrs),
[&simulated_size, &per_thread_events, &num_threads](auto const& mr) {
declare_benchmark(mr, simulated_size, per_thread_events, num_threads);
});
}
::benchmark::RunSpecifiedBenchmarks();
} catch (std::exception const& e) {
std::cout << "Exception caught: " << e.what() << std::endl;
}
return 0;
}
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/random_allocations/random_allocations.cpp
|
/*
* Copyright (c) 2019-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/utilities/cxxopts.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/binning_memory_resource.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <benchmark/benchmark.h>
#include <array>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#define VERBOSE 0
namespace {
constexpr std::size_t size_mb{1 << 20};
struct allocation {
void* ptr{nullptr};
std::size_t size{0};
allocation(void* ptr, std::size_t size) : ptr{ptr}, size{size} {}
allocation() = default;
};
using allocation_vector = std::vector<allocation>;
allocation remove_at(allocation_vector& allocs, std::size_t index)
{
assert(index < allocs.size());
auto removed = allocs[index];
if ((allocs.size() > 1) && (index < allocs.size() - 1)) {
std::swap(allocs[index], allocs.back());
}
allocs.pop_back();
return removed;
}
template <typename SizeDistribution>
void random_allocation_free(rmm::mr::device_memory_resource& mr,
SizeDistribution size_distribution,
std::size_t num_allocations,
std::size_t max_usage, // in MiB
rmm::cuda_stream_view stream = {})
{
std::default_random_engine generator;
max_usage *= size_mb; // convert to bytes
constexpr int allocation_probability{73}; // percent
constexpr int max_op_chance{99};
std::uniform_int_distribution<int> op_distribution(0, max_op_chance);
std::uniform_int_distribution<int> index_distribution(0, static_cast<int>(num_allocations) - 1);
int active_allocations{0};
std::size_t allocation_count{0};
allocation_vector allocations{};
std::size_t allocation_size{0};
for (std::size_t i = 0; i < num_allocations * 2; ++i) {
bool do_alloc = true;
auto size = static_cast<std::size_t>(size_distribution(generator));
if (active_allocations > 0) {
int chance = op_distribution(generator);
do_alloc = (chance < allocation_probability) && (allocation_count < num_allocations) &&
(allocation_size + size < max_usage);
}
void* ptr = nullptr;
if (do_alloc) { // try to allocate
try {
ptr = mr.allocate(size, stream);
} catch (rmm::bad_alloc const&) {
do_alloc = false;
#if VERBOSE
std::cout << "FAILED to allocate " << size << "\n";
#endif
}
}
if (do_alloc) { // alloc succeeded
allocations.emplace_back(ptr, size);
active_allocations++;
allocation_count++;
allocation_size += size;
#if VERBOSE
std::cout << active_allocations << " | " << allocation_count << " Allocating: " << size
<< " | total: " << allocation_size << "\n";
#endif
} else { // dealloc, or alloc failed
if (active_allocations > 0) {
std::size_t index = index_distribution(generator) % active_allocations;
active_allocations--;
allocation to_free = remove_at(allocations, index);
mr.deallocate(to_free.ptr, to_free.size, stream);
allocation_size -= to_free.size;
#if VERBOSE
std::cout << active_allocations << " | " << allocation_count
<< " Deallocating: " << to_free.size << " at " << index
<< " | total: " << allocation_size << "\n";
#endif
}
}
}
// std::cout << "TOTAL ALLOCATIONS: " << allocation_count << "\n";
assert(active_allocations == 0);
assert(allocations.size() == 0);
}
} // namespace
void uniform_random_allocations(
rmm::mr::device_memory_resource& mr,
std::size_t num_allocations, // NOLINT(bugprone-easily-swappable-parameters)
std::size_t max_allocation_size, // size in MiB
std::size_t max_usage,
rmm::cuda_stream_view stream = {})
{
std::uniform_int_distribution<std::size_t> size_distribution(1, max_allocation_size * size_mb);
random_allocation_free(mr, size_distribution, num_allocations, max_usage, stream);
}
// TODO figure out how to map a normal distribution to integers between 1 and max_allocation_size
/*void normal_random_allocations(rmm::mr::device_memory_resource& mr,
std::size_t num_allocations = 1000,
std::size_t mean_allocation_size = 500, // in MiB
std::size_t stddev_allocation_size = 500, // in MiB
std::size_t max_usage = 8 << 20,
cuda_stream_view stream) {
std::normal_distribution<std::size_t> size_distribution(, max_allocation_size * size_mb);
}*/
/// MR factory functions
inline auto make_cuda() { return std::make_shared<rmm::mr::cuda_memory_resource>(); }
inline auto make_cuda_async() { return std::make_shared<rmm::mr::cuda_async_memory_resource>(); }
inline auto make_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda());
}
inline auto make_arena()
{
auto free = rmm::detail::available_device_memory().first;
constexpr auto reserve{64UL << 20}; // Leave some space for CUDA overhead.
return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(make_cuda(), free - reserve);
}
inline auto make_binning()
{
auto pool = make_pool();
// Add a binning_memory_resource with fixed-size bins of sizes 256, 512, 1024, 2048 and 4096KiB
// Larger allocations will use the pool resource
constexpr auto min_bin_pow2{18};
constexpr auto max_bin_pow2{22};
auto mr = rmm::mr::make_owning_wrapper<rmm::mr::binning_memory_resource>(
pool, min_bin_pow2, max_bin_pow2);
return mr;
}
using MRFactoryFunc = std::function<std::shared_ptr<rmm::mr::device_memory_resource>()>;
constexpr std::size_t max_usage = 16000;
static void BM_RandomAllocations(benchmark::State& state, MRFactoryFunc const& factory)
{
auto mr = factory();
std::size_t num_allocations = state.range(0);
std::size_t max_size = state.range(1);
try {
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
uniform_random_allocations(*mr, num_allocations, max_size, max_usage);
}
} catch (std::exception const& e) {
std::cout << "Error: " << e.what() << "\n";
}
}
static void num_range(benchmark::internal::Benchmark* bench, int size)
{
for (int num_allocations : std::vector<int>{1000, 10000, 100000}) {
bench->Args({num_allocations, size})->Unit(benchmark::kMillisecond);
}
}
static void size_range(benchmark::internal::Benchmark* bench, int num)
{
for (int max_size : std::vector<int>{1, 4, 64, 256, 1024, 4096}) {
bench->Args({num, max_size})->Unit(benchmark::kMillisecond);
}
}
static void num_size_range(benchmark::internal::Benchmark* bench)
{
for (int num_allocations : std::vector<int>{1000, 10000, 100000}) {
size_range(bench, num_allocations);
}
}
int num_allocations = -1; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
int max_size = -1; // NOLINT(cppcoreguidelines-avoid-non-const-global-variables)
void benchmark_range(benchmark::internal::Benchmark* bench)
{
if (num_allocations > 0) {
if (max_size > 0) {
bench->Args({num_allocations, max_size})->Unit(benchmark::kMillisecond);
} else {
size_range(bench, num_allocations);
}
} else {
if (max_size > 0) {
num_range(bench, max_size);
} else {
num_size_range(bench);
}
}
}
void declare_benchmark(std::string const& name)
{
if (name == "cuda") {
BENCHMARK_CAPTURE(BM_RandomAllocations, cuda_mr, &make_cuda) // NOLINT
->Apply(benchmark_range);
}
if (name == "cuda_async") {
BENCHMARK_CAPTURE(BM_RandomAllocations, cuda_async_mr, &make_cuda_async) // NOLINT
->Apply(benchmark_range);
} else if (name == "binning") {
BENCHMARK_CAPTURE(BM_RandomAllocations, binning_mr, &make_binning) // NOLINT
->Apply(benchmark_range);
} else if (name == "pool") {
BENCHMARK_CAPTURE(BM_RandomAllocations, pool_mr, &make_pool) // NOLINT
->Apply(benchmark_range);
} else if (name == "arena") {
BENCHMARK_CAPTURE(BM_RandomAllocations, arena_mr, &make_arena) // NOLINT
->Apply(benchmark_range);
} else {
std::cout << "Error: invalid memory_resource name: " << name << "\n";
}
}
static void profile_random_allocations(MRFactoryFunc const& factory,
std::size_t num_allocations,
std::size_t max_size)
{
auto mr = factory();
try {
uniform_random_allocations(*mr, num_allocations, max_size, max_usage);
} catch (std::exception const& e) {
std::cout << "Error: " << e.what() << "\n";
}
}
int main(int argc, char** argv)
{
try {
// benchmark::Initialize will remove GBench command line arguments it
// recognizes and leave any remaining arguments
::benchmark::Initialize(&argc, argv);
// Parse for replay arguments:
cxxopts::Options options("RMM Random Allocations Benchmark",
"Benchmarks random allocations within a size range.");
options.add_options()(
"p,profile", "Profiling mode: run once", cxxopts::value<bool>()->default_value("false"));
options.add_options()("r,resource",
"Type of device_memory_resource",
cxxopts::value<std::string>()->default_value("pool"));
options.add_options()("n,numallocs",
"Number of allocations (default of 0 tests a range)",
cxxopts::value<int>()->default_value("1000"));
options.add_options()("m,maxsize",
"Maximum allocation size (default of 0 tests a range)",
cxxopts::value<int>()->default_value("4096"));
auto args = options.parse(argc, argv);
num_allocations = args["numallocs"].as<int>();
max_size = args["maxsize"].as<int>();
if (args.count("profile") > 0) {
std::map<std::string, MRFactoryFunc> const funcs({{"arena", &make_arena},
{"binning", &make_binning},
{"cuda", &make_cuda},
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
{"cuda_async", &make_cuda_async},
#endif
{"pool", &make_pool}});
auto resource = args["resource"].as<std::string>();
std::cout << "Profiling " << resource << " with " << num_allocations << " allocations of max "
<< max_size << "B\n";
profile_random_allocations(funcs.at(resource), num_allocations, max_size);
std::cout << "Finished\n";
} else {
if (args.count("numallocs") == 0) { // if zero reset to -1 so we benchmark over a range
num_allocations = -1;
}
if (args.count("maxsize") == 0) { // if zero reset to -1 so we benchmark over a range
max_size = -1;
}
if (args.count("resource") > 0) {
std::string mr_name = args["resource"].as<std::string>();
declare_benchmark(mr_name);
} else {
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
std::vector<std::string> mrs{"pool", "binning", "arena", "cuda_async", "cuda"};
#else
std::vector<std::string> mrs{"pool", "binning", "arena", "cuda"};
#endif
std::for_each(
std::cbegin(mrs), std::cend(mrs), [](auto const& mr) { declare_benchmark(mr); });
}
::benchmark::RunSpecifiedBenchmarks();
}
} catch (std::exception const& e) {
std::cout << "Exception caught: " << e.what() << std::endl;
}
return 0;
}
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/device_uvector/device_uvector_bench.cu
|
/*
* Copyright (c) 2020-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../synchronization/synchronization.hpp"
#include <rmm/cuda_stream.hpp>
#include <rmm/detail/error.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <benchmark/benchmark.h>
#include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/memory.h>
#include <cstdio>
#include <type_traits>
void BM_UvectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
rmm::mr::set_current_device_resource(&mr);
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
rmm::device_uvector<std::int32_t> vec(state.range(0), rmm::cuda_stream_view{});
cudaDeviceSynchronize();
}
state.SetItemsProcessed(static_cast<std::int64_t>(state.iterations()));
rmm::mr::set_current_device_resource(nullptr);
}
BENCHMARK(BM_UvectorSizeConstruction)
->RangeMultiplier(10) // NOLINT
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);
void BM_ThrustVectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
rmm::mr::set_current_device_resource(&mr);
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
rmm::device_vector<std::int32_t> vec(state.range(0));
cudaDeviceSynchronize();
}
state.SetItemsProcessed(static_cast<std::int64_t>(state.iterations()));
rmm::mr::set_current_device_resource(nullptr);
}
BENCHMARK(BM_ThrustVectorSizeConstruction)
->RangeMultiplier(10) // NOLINT
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);
// simple kernel used to test concurrent execution.
__global__ void kernel(int const* input, int* output, std::size_t num)
{
for (auto i = blockDim.x * blockIdx.x + threadIdx.x; i < num; i += gridDim.x * blockDim.x) {
output[i] = input[i] * input[i];
}
}
using thrust_vector = thrust::device_vector<int32_t>;
using rmm_vector = rmm::device_vector<int32_t>;
using rmm_uvector = rmm::device_uvector<int32_t>;
template <typename Vector>
Vector make_vector(std::int64_t num_elements, rmm::cuda_stream_view stream, bool zero_init = false)
{
static_assert(std::is_same_v<Vector, thrust_vector> or std::is_same_v<Vector, rmm_vector> or
std::is_same_v<Vector, rmm_uvector>,
"unsupported vector type");
if constexpr (std::is_same_v<Vector, thrust_vector>) {
return Vector(num_elements, 0);
} else if constexpr (std::is_same_v<Vector, rmm_vector>) {
return Vector(num_elements, 0, rmm::mr::thrust_allocator<std::int32_t>(stream));
} else if constexpr (std::is_same_v<Vector, rmm_uvector>) {
auto vec = Vector(num_elements, stream);
if (zero_init) {
cudaMemsetAsync(vec.data(), 0, num_elements * sizeof(std::int32_t), stream.value());
}
return vec;
}
}
template <typename Vector>
int32_t* vector_data(Vector& vec)
{
return thrust::raw_pointer_cast(vec.data());
}
template <typename Vector>
void vector_workflow(std::size_t num_elements,
std::int64_t num_blocks,
std::int64_t block_size,
rmm::cuda_stream const& input_stream,
std::vector<rmm::cuda_stream> const& streams)
{
auto input = make_vector<Vector>(num_elements, input_stream, true);
input_stream.synchronize();
for (rmm::cuda_stream_view stream : streams) {
auto output = make_vector<Vector>(num_elements, stream);
kernel<<<num_blocks, block_size, 0, stream.value()>>>(
vector_data(input), vector_data(output), num_elements);
}
for (rmm::cuda_stream_view stream : streams) {
stream.synchronize();
}
}
template <typename Vector>
void BM_VectorWorkflow(benchmark::State& state)
{
rmm::mr::cuda_async_memory_resource cuda_async_mr{};
rmm::mr::set_current_device_resource(&cuda_async_mr);
rmm::cuda_stream input_stream;
std::vector<rmm::cuda_stream> streams(4);
auto const num_elements = state.range(0);
auto constexpr block_size = 256;
auto constexpr num_blocks = 16;
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
cuda_event_timer timer(state, true, input_stream); // flush_l2_cache = true
vector_workflow<Vector>(num_elements, num_blocks, block_size, input_stream, streams);
}
auto constexpr num_accesses = 9;
auto const bytes = num_elements * sizeof(std::int32_t) * num_accesses;
state.SetBytesProcessed(static_cast<std::int64_t>(state.iterations() * bytes));
rmm::mr::set_current_device_resource(nullptr);
}
BENCHMARK_TEMPLATE(BM_VectorWorkflow, thrust_vector) // NOLINT
->RangeMultiplier(10) // NOLINT
->Range(100'000, 100'000'000) // NOLINT
->Unit(benchmark::kMicrosecond)
->UseManualTime();
// The only difference here is that `rmm::device_vector` uses `rmm::current_device_resource()`
// for allocation while `thrust::device_vector` uses cudaMalloc/cudaFree. In the benchmarks we use
// `cuda_async_memory_resource`, which is faster.
BENCHMARK_TEMPLATE(BM_VectorWorkflow, rmm_vector) // NOLINT
->RangeMultiplier(10) // NOLINT
->Range(100'000, 100'000'000) // NOLINT
->Unit(benchmark::kMicrosecond)
->UseManualTime();
BENCHMARK_TEMPLATE(BM_VectorWorkflow, rmm_uvector) // NOLINT
->RangeMultiplier(10) // NOLINT
->Range(100'000, 100'000'000) // NOLINT
->Unit(benchmark::kMicrosecond)
->UseManualTime();
BENCHMARK_MAIN();
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/synchronization/synchronization.cpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "synchronization.hpp"
#include <rmm/device_buffer.hpp>
#ifdef NDEBUG
#define RMM_CUDA_ASSERT_OK(expr) expr
#else
#define RMM_CUDA_ASSERT_OK(expr) \
do { \
cudaError_t const status = (expr); \
assert(cudaSuccess == status); \
} while (0);
#endif
cuda_event_timer::cuda_event_timer(benchmark::State& state,
bool flush_l2_cache,
rmm::cuda_stream_view stream)
: stream(stream), p_state(&state)
{
// flush all of L2$
if (flush_l2_cache) {
int current_device = 0;
RMM_CUDA_TRY(cudaGetDevice(¤t_device));
int l2_cache_bytes = 0;
RMM_CUDA_TRY(cudaDeviceGetAttribute(&l2_cache_bytes, cudaDevAttrL2CacheSize, current_device));
if (l2_cache_bytes > 0) {
const int memset_value = 0;
rmm::device_buffer l2_cache_buffer(l2_cache_bytes, stream);
RMM_CUDA_TRY(
cudaMemsetAsync(l2_cache_buffer.data(), memset_value, l2_cache_bytes, stream.value()));
}
}
RMM_CUDA_TRY(cudaEventCreate(&start));
RMM_CUDA_TRY(cudaEventCreate(&stop));
RMM_CUDA_TRY(cudaEventRecord(start, stream.value()));
}
cuda_event_timer::~cuda_event_timer()
{
RMM_CUDA_ASSERT_OK(cudaEventRecord(stop, stream.value()));
RMM_CUDA_ASSERT_OK(cudaEventSynchronize(stop));
float milliseconds = 0.0F;
RMM_CUDA_ASSERT_OK(cudaEventElapsedTime(&milliseconds, start, stop));
const auto to_milliseconds{1.0F / 1000};
p_state->SetIterationTime(milliseconds * to_milliseconds);
RMM_CUDA_ASSERT_OK(cudaEventDestroy(start));
RMM_CUDA_ASSERT_OK(cudaEventDestroy(stop));
}
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/synchronization/synchronization.hpp
|
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file synchronization.hpp
* @brief This is the header file for `cuda_event_timer`.
*/
/**
* @brief This class serves as a wrapper for using `cudaEvent_t` as the user
* defined timer within the framework of google benchmark
* (https://github.com/google/benchmark).
*
* It is built on top of the idea of Resource acquisition is initialization
* (RAII). In the following we show a minimal example of how to use this class.
#include <benchmark/benchmark.h>
static void sample_cuda_benchmark(benchmark::State& state) {
for (auto _ : state){
cudaStream_t stream = 0;
// Create (Construct) an object of this class. You HAVE to pass in the
// benchmark::State object you are using. It measures the time from its
// creation to its destruction that is spent on the specified CUDA stream.
// It also clears the L2 cache by cudaMemset'ing a device buffer that is of
// the size of the L2 cache (if flush_l2_cache is set to true and there is
// an L2 cache on the current device).
cuda_event_timer raii(state, true, stream); // flush_l2_cache = true
// Now perform the operations that is to be benchmarked
sample_kernel<<<1, 256, 0, stream>>>(); // Possibly launching a CUDA kernel
}
}
// Register the function as a benchmark. You will need to set the `UseManualTime()`
// flag in order to use the timer embedded in this class.
BENCHMARK(sample_cuda_benchmark)->UseManualTime();
*/
#pragma once
#include <rmm/cuda_stream_view.hpp>
// Google Benchmark library
#include <benchmark/benchmark.h>
#include <cuda_runtime_api.h>
class cuda_event_timer {
public:
/**
* @brief This c'tor clears the L2$ by cudaMemset'ing a buffer of L2$ size
* and starts the timer.
*
* @param[in,out] state This is the benchmark::State whose timer we are going
* to update.
* @param[in] flush_l2_cache whether or not to flush the L2 cache before
* every iteration.
* @param[in] stream The CUDA stream we are measuring time on.
*/
cuda_event_timer(benchmark::State& state,
bool flush_l2_cache,
rmm::cuda_stream_view stream = rmm::cuda_stream_default);
// The user will HAVE to provide a benchmark::State object to set
// the timer so we disable the default c'tor.
cuda_event_timer() = delete;
// The d'tor stops the timer and performs a synchroniazation.
// Time of the benchmark::State object provided to the c'tor
// will be set to the value given by `cudaEventElapsedTime`.
~cuda_event_timer();
// disable copy and move
cuda_event_timer(cuda_event_timer const&) = delete;
cuda_event_timer& operator=(cuda_event_timer const&) = delete;
cuda_event_timer(cuda_event_timer&&) = delete;
cuda_event_timer& operator=(cuda_event_timer&&) = delete;
private:
cudaEvent_t start{};
cudaEvent_t stop{};
rmm::cuda_stream_view stream{};
benchmark::State* p_state{};
};
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/multi_stream_allocations/multi_stream_allocations_bench.cu
|
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/utilities/cxxopts.hpp>
#include <rmm/cuda_stream.hpp>
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/arena_memory_resource.hpp>
#include <rmm/mr/device/binning_memory_resource.hpp>
#include <rmm/mr/device/cuda_async_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <cuda_runtime_api.h>
#include <benchmark/benchmark.h>
#include <cstddef>
__global__ void compute_bound_kernel(int64_t* out)
{
clock_t clock_begin = clock64();
clock_t clock_current = clock_begin;
auto const million{1'000'000};
if (threadIdx.x == 0) { // NOLINT(readability-static-accessed-through-instance)
while (clock_current - clock_begin < million) {
clock_current = clock64();
}
}
*out = static_cast<int64_t>(clock_current);
}
using MRFactoryFunc = std::function<std::shared_ptr<rmm::mr::device_memory_resource>()>;
static void run_prewarm(rmm::cuda_stream_pool& stream_pool, rmm::mr::device_memory_resource* mr)
{
auto buffers = std::vector<rmm::device_uvector<int64_t>>();
for (int32_t i = 0; i < stream_pool.get_pool_size(); i++) {
auto stream = stream_pool.get_stream(i);
buffers.emplace_back(rmm::device_uvector<int64_t>(1, stream, mr));
}
}
static void run_test(std::size_t num_kernels,
rmm::cuda_stream_pool& stream_pool,
rmm::mr::device_memory_resource* mr)
{
for (int32_t i = 0; i < num_kernels; i++) {
auto stream = stream_pool.get_stream(i);
auto buffer = rmm::device_uvector<int64_t>(1, stream, mr);
compute_bound_kernel<<<1, 1, 0, stream.value()>>>(buffer.data());
}
}
static void BM_MultiStreamAllocations(benchmark::State& state, MRFactoryFunc const& factory)
{
auto mr = factory();
rmm::mr::set_current_device_resource(mr.get());
auto num_streams = state.range(0);
auto num_kernels = state.range(1);
bool do_prewarm = state.range(2) != 0;
auto stream_pool = rmm::cuda_stream_pool(num_streams);
if (do_prewarm) { run_prewarm(stream_pool, mr.get()); }
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
run_test(num_kernels, stream_pool, mr.get());
cudaDeviceSynchronize();
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations() * num_kernels));
rmm::mr::set_current_device_resource(nullptr);
}
inline auto make_cuda() { return std::make_shared<rmm::mr::cuda_memory_resource>(); }
inline auto make_cuda_async() { return std::make_shared<rmm::mr::cuda_async_memory_resource>(); }
inline auto make_pool()
{
return rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(make_cuda());
}
inline auto make_arena()
{
return rmm::mr::make_owning_wrapper<rmm::mr::arena_memory_resource>(make_cuda());
}
inline auto make_binning()
{
auto pool = make_pool();
// Add a binning_memory_resource with fixed-size bins of sizes 256, 512, 1024, 2048 and 4096KiB
// Larger allocations will use the pool resource
constexpr auto min_bin_pow2{18};
constexpr auto max_bin_pow2{22};
auto mr = rmm::mr::make_owning_wrapper<rmm::mr::binning_memory_resource>(
pool, min_bin_pow2, max_bin_pow2);
return mr;
}
static void benchmark_range(benchmark::internal::Benchmark* bench)
{
bench //
->RangeMultiplier(2)
->Ranges({{1, 4}, {4, 4}, {false, true}})
->Unit(benchmark::kMicrosecond);
}
MRFactoryFunc get_mr_factory(std::string const& resource_name)
{
if (resource_name == "cuda") { return &make_cuda; }
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
if (resource_name == "cuda_async") { return &make_cuda_async; }
#endif
if (resource_name == "pool") { return &make_pool; }
if (resource_name == "arena") { return &make_arena; }
if (resource_name == "binning") { return &make_binning; }
std::cout << "Error: invalid memory_resource name: " << resource_name << std::endl;
RMM_FAIL();
}
void declare_benchmark(std::string const& name)
{
if (name == "cuda") {
BENCHMARK_CAPTURE(BM_MultiStreamAllocations, cuda, &make_cuda) //
->Apply(benchmark_range);
return;
}
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
if (name == "cuda_async") {
BENCHMARK_CAPTURE(BM_MultiStreamAllocations, cuda_async, &make_cuda_async) //
->Apply(benchmark_range);
return;
}
#endif
if (name == "pool") {
BENCHMARK_CAPTURE(BM_MultiStreamAllocations, pool_mr, &make_pool) //
->Apply(benchmark_range);
return;
}
if (name == "arena") {
BENCHMARK_CAPTURE(BM_MultiStreamAllocations, arena, &make_arena) //
->Apply(benchmark_range);
return;
}
if (name == "binning") {
BENCHMARK_CAPTURE(BM_MultiStreamAllocations, binning, &make_binning) //
->Apply(benchmark_range);
return;
}
std::cout << "Error: invalid memory_resource name: " << name << std::endl;
}
// NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
void run_profile(std::string const& resource_name, int kernel_count, int stream_count, bool prewarm)
{
auto mr_factory = get_mr_factory(resource_name);
auto mr = mr_factory();
auto stream_pool = rmm::cuda_stream_pool(stream_count);
if (prewarm) { run_prewarm(stream_pool, mr.get()); }
run_test(kernel_count, stream_pool, mr.get());
}
int main(int argc, char** argv)
{
try {
::benchmark::Initialize(&argc, argv);
// Parse for replay arguments:
cxxopts::Options options(
"RMM Multi Stream Allocations Benchmark",
"Benchmarks interleaving temporary allocations with compute-bound kernels.");
options.add_options()( //
"p,profile",
"Profiling mode: run once",
cxxopts::value<bool>()->default_value("false"));
options.add_options()( //
"r,resource",
"Type of device_memory_resource",
cxxopts::value<std::string>()->default_value("pool"));
options.add_options()( //
"k,kernels",
"Number of kernels to run: (default: 8)",
cxxopts::value<int>()->default_value("8"));
options.add_options()( //
"s,streams",
"Number of streams in stream pool (default: 8)",
cxxopts::value<int>()->default_value("8"));
options.add_options()( //
"w,warm",
"Ensure each stream has enough memory to satisfy allocations.",
cxxopts::value<bool>()->default_value("false"));
auto args = options.parse(argc, argv);
if (args.count("profile") > 0) {
auto resource_name = args["resource"].as<std::string>();
auto num_kernels = args["kernels"].as<int>();
auto num_streams = args["streams"].as<int>();
auto prewarm = args["warm"].as<bool>();
try {
run_profile(resource_name, num_kernels, num_streams, prewarm);
} catch (std::exception const& e) {
std::cout << "Exception caught: " << e.what() << std::endl;
}
} else {
auto resource_names = std::vector<std::string>();
if (args.count("resource") > 0) {
resource_names.emplace_back(args["resource"].as<std::string>());
} else {
resource_names.emplace_back("cuda");
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
resource_names.emplace_back("cuda_async");
#endif
resource_names.emplace_back("pool");
resource_names.emplace_back("arena");
resource_names.emplace_back("binning");
}
for (auto& resource_name : resource_names) {
declare_benchmark(resource_name);
}
::benchmark::RunSpecifiedBenchmarks();
}
} catch (std::exception const& e) {
std::cout << "Exception caught: " << e.what() << std::endl;
}
return 0;
}
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/cuda_stream_pool/cuda_stream_pool_bench.cpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/cuda_stream_pool.hpp>
#include <rmm/detail/error.hpp>
#include <cuda_runtime_api.h>
#include <benchmark/benchmark.h>
#include <stdexcept>
static void BM_StreamPoolGetStream(benchmark::State& state)
{
rmm::cuda_stream_pool stream_pool{};
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
auto stream = stream_pool.get_stream();
cudaStreamQuery(stream.value());
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_StreamPoolGetStream)->Unit(benchmark::kMicrosecond);
static void BM_CudaStreamClass(benchmark::State& state)
{
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
auto stream = rmm::cuda_stream{};
cudaStreamQuery(stream.view().value());
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_CudaStreamClass)->Unit(benchmark::kMicrosecond);
BENCHMARK_MAIN();
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/utilities/simulated_memory_resource.hpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <cuda_runtime_api.h>
namespace rmm::mr {
/**
* @brief A device memory resource that simulates a fix-sized GPU.
*
* Only allocation calls are simulated. New memory is allocated sequentially in monotonically
* increasing address based on the requested size, until the predetermined size is exceeded.
*
* Deallocation calls are ignored.
*/
class simulated_memory_resource final : public device_memory_resource {
public:
/**
* @brief Construct a `simulated_memory_resource`.
*
* @param memory_size_bytes The size of the memory to simulate.
*/
explicit simulated_memory_resource(std::size_t memory_size_bytes)
: begin_{reinterpret_cast<char*>(0x100)}, // NOLINT
end_{reinterpret_cast<char*>(begin_ + memory_size_bytes)} // NOLINT
{
}
~simulated_memory_resource() override = default;
// Disable copy (and move) semantics.
simulated_memory_resource(simulated_memory_resource const&) = delete;
simulated_memory_resource& operator=(simulated_memory_resource const&) = delete;
simulated_memory_resource(simulated_memory_resource&&) = delete;
simulated_memory_resource& operator=(simulated_memory_resource&&) = delete;
/**
* @brief Query whether the resource supports use of non-null CUDA streams for
* allocation/deallocation.
*
* @returns bool false
*/
[[nodiscard]] bool supports_streams() const noexcept override { return false; }
/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return false
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }
private:
/**
* @brief Allocates memory of size at least `bytes`.
*
* @note Stream argument is ignored
*
* @throws rmm::bad_alloc if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, cuda_stream_view) override
{
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
RMM_EXPECTS(begin_ + bytes <= end_, "Simulated memory size exceeded", rmm::bad_alloc);
auto* ptr = static_cast<void*>(begin_);
begin_ += bytes; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
return ptr;
}
/**
* @brief Deallocate memory pointed to by `p`.
*
* @note This call is ignored.
*
* @param ptr Pointer to be deallocated
*/
void do_deallocate(void* ptr, std::size_t, cuda_stream_view) override {}
/**
* @brief Get free and available memory for memory resource.
*
* @param stream to execute on.
* @return std::pair containing free_size and total_size of memory.
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
cuda_stream_view stream) const override
{
return std::make_pair(0, 0);
}
char* begin_{};
char* end_{};
};
} // namespace rmm::mr
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/utilities/cxxopts.hpp
|
/*
Copyright (c) 2014, 2015, 2016, 2017 Jarryd Beck
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef CXXOPTS_HPP_INCLUDED
#define CXXOPTS_HPP_INCLUDED
#include <cctype>
#include <cstring>
#include <exception>
#include <iostream>
#include <limits>
#include <map>
#include <memory>
#include <regex>
#include <sstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#ifdef __cpp_lib_optional
#include <optional>
#define CXXOPTS_HAS_OPTIONAL
#endif
#ifndef CXXOPTS_VECTOR_DELIMITER
#define CXXOPTS_VECTOR_DELIMITER ','
#endif
#define CXXOPTS__VERSION_MAJOR 2
#define CXXOPTS__VERSION_MINOR 2
#define CXXOPTS__VERSION_PATCH 0
namespace cxxopts {
static constexpr struct {
uint8_t major, minor, patch;
} version = {CXXOPTS__VERSION_MAJOR, CXXOPTS__VERSION_MINOR, CXXOPTS__VERSION_PATCH};
} // namespace cxxopts
// when we ask cxxopts to use Unicode, help strings are processed using ICU,
// which results in the correct lengths being computed for strings when they
// are formatted for the help output
// it is necessary to make sure that <unicode/unistr.h> can be found by the
// compiler, and that icu-uc is linked in to the binary.
#ifdef CXXOPTS_USE_UNICODE
#include <unicode/unistr.h>
namespace cxxopts {
typedef icu::UnicodeString String;
inline String toLocalString(std::string s) { return icu::UnicodeString::fromUTF8(std::move(s)); }
class UnicodeStringIterator : public std::iterator<std::forward_iterator_tag, int32_t> {
public:
UnicodeStringIterator(const icu::UnicodeString* string, int32_t pos) : s(string), i(pos) {}
value_type operator*() const { return s->char32At(i); }
bool operator==(const UnicodeStringIterator& rhs) const { return s == rhs.s && i == rhs.i; }
bool operator!=(const UnicodeStringIterator& rhs) const { return !(*this == rhs); }
UnicodeStringIterator& operator++()
{
++i;
return *this;
}
UnicodeStringIterator operator+(int32_t v) { return UnicodeStringIterator(s, i + v); }
private:
const icu::UnicodeString* s;
int32_t i;
};
inline String& stringAppend(String& s, String a) { return s.append(std::move(a)); }
inline String& stringAppend(String& s, int n, UChar32 c)
{
for (int i = 0; i != n; ++i) {
s.append(c);
}
return s;
}
template <typename Iterator>
String& stringAppend(String& s, Iterator begin, Iterator end)
{
while (begin != end) {
s.append(*begin);
++begin;
}
return s;
}
inline size_t stringLength(const String& s) { return s.length(); }
inline std::string toUTF8String(const String& s)
{
std::string result;
s.toUTF8String(result);
return result;
}
inline bool empty(const String& s) { return s.isEmpty(); }
} // namespace cxxopts
namespace std {
inline cxxopts::UnicodeStringIterator begin(const icu::UnicodeString& s)
{
return cxxopts::UnicodeStringIterator(&s, 0);
}
inline cxxopts::UnicodeStringIterator end(const icu::UnicodeString& s)
{
return cxxopts::UnicodeStringIterator(&s, s.length());
}
} // namespace std
// ifdef CXXOPTS_USE_UNICODE
#else
namespace cxxopts {
typedef std::string String;
template <typename T>
T toLocalString(T&& t)
{
return std::forward<T>(t);
}
inline size_t stringLength(const String& s) { return s.length(); }
inline String& stringAppend(String& s, String a) { return s.append(std::move(a)); }
inline String& stringAppend(String& s, size_t n, char c) { return s.append(n, c); }
template <typename Iterator>
String& stringAppend(String& s, Iterator begin, Iterator end)
{
return s.append(begin, end);
}
template <typename T>
std::string toUTF8String(T&& t)
{
return std::forward<T>(t);
}
inline bool empty(const std::string& s) { return s.empty(); }
} // namespace cxxopts
// ifdef CXXOPTS_USE_UNICODE
#endif
namespace cxxopts {
namespace {
#ifdef _WIN32
const std::string LQUOTE("\'");
const std::string RQUOTE("\'");
#else
const std::string LQUOTE("‘");
const std::string RQUOTE("’");
#endif
} // namespace
class Value : public std::enable_shared_from_this<Value> {
public:
virtual ~Value() = default;
virtual std::shared_ptr<Value> clone() const = 0;
virtual void parse(const std::string& text) const = 0;
virtual void parse() const = 0;
virtual bool has_default() const = 0;
virtual bool is_container() const = 0;
virtual bool has_implicit() const = 0;
virtual std::string get_default_value() const = 0;
virtual std::string get_implicit_value() const = 0;
virtual std::shared_ptr<Value> default_value(const std::string& value) = 0;
virtual std::shared_ptr<Value> implicit_value(const std::string& value) = 0;
virtual std::shared_ptr<Value> no_implicit_value() = 0;
virtual bool is_boolean() const = 0;
};
class OptionException : public std::exception {
public:
OptionException(const std::string& message) : m_message(message) {}
virtual const char* what() const noexcept { return m_message.c_str(); }
private:
std::string m_message;
};
class OptionSpecException : public OptionException {
public:
OptionSpecException(const std::string& message) : OptionException(message) {}
};
class OptionParseException : public OptionException {
public:
OptionParseException(const std::string& message) : OptionException(message) {}
};
class option_exists_error : public OptionSpecException {
public:
option_exists_error(const std::string& option)
: OptionSpecException("Option " + LQUOTE + option + RQUOTE + " already exists")
{
}
};
class invalid_option_format_error : public OptionSpecException {
public:
invalid_option_format_error(const std::string& format)
: OptionSpecException("Invalid option format " + LQUOTE + format + RQUOTE)
{
}
};
class option_syntax_exception : public OptionParseException {
public:
option_syntax_exception(const std::string& text)
: OptionParseException("Argument " + LQUOTE + text + RQUOTE +
" starts with a - but has incorrect syntax")
{
}
};
class option_not_exists_exception : public OptionParseException {
public:
option_not_exists_exception(const std::string& option)
: OptionParseException("Option " + LQUOTE + option + RQUOTE + " does not exist")
{
}
};
class missing_argument_exception : public OptionParseException {
public:
missing_argument_exception(const std::string& option)
: OptionParseException("Option " + LQUOTE + option + RQUOTE + " is missing an argument")
{
}
};
class option_requires_argument_exception : public OptionParseException {
public:
option_requires_argument_exception(const std::string& option)
: OptionParseException("Option " + LQUOTE + option + RQUOTE + " requires an argument")
{
}
};
class option_not_has_argument_exception : public OptionParseException {
public:
option_not_has_argument_exception(const std::string& option, const std::string& arg)
: OptionParseException("Option " + LQUOTE + option + RQUOTE +
" does not take an argument, but argument " + LQUOTE + arg + RQUOTE +
" given")
{
}
};
class option_not_present_exception : public OptionParseException {
public:
option_not_present_exception(const std::string& option)
: OptionParseException("Option " + LQUOTE + option + RQUOTE + " not present")
{
}
};
class argument_incorrect_type : public OptionParseException {
public:
argument_incorrect_type(const std::string& arg)
: OptionParseException("Argument " + LQUOTE + arg + RQUOTE + " failed to parse")
{
}
};
class option_required_exception : public OptionParseException {
public:
option_required_exception(const std::string& option)
: OptionParseException("Option " + LQUOTE + option + RQUOTE + " is required but not present")
{
}
};
template <typename T>
void throw_or_mimic(const std::string& text)
{
static_assert(std::is_base_of<std::exception, T>::value,
"throw_or_mimic only works on std::exception and "
"deriving classes");
#ifndef CXXOPTS_NO_EXCEPTIONS
// If CXXOPTS_NO_EXCEPTIONS is not defined, just throw
throw T{text};
#else
// Otherwise manually instantiate the exception, print what() to stderr,
// and abort
T exception{text};
std::cerr << exception.what() << std::endl;
std::cerr << "Aborting (exceptions disabled)..." << std::endl;
std::abort();
#endif
}
namespace values {
namespace {
std::basic_regex<char> integer_pattern("(-)?(0x)?([0-9a-zA-Z]+)|((0x)?0)");
std::basic_regex<char> truthy_pattern("(t|T)(rue)?|1");
std::basic_regex<char> falsy_pattern("(f|F)(alse)?|0");
} // namespace
namespace detail {
template <typename T, bool B>
struct SignedCheck;
template <typename T>
struct SignedCheck<T, true> {
template <typename U>
void operator()(bool negative, U u, const std::string& text)
{
if (negative) {
if (u > static_cast<U>((std::numeric_limits<T>::min)())) {
throw_or_mimic<argument_incorrect_type>(text);
}
} else {
if (u > static_cast<U>((std::numeric_limits<T>::max)())) {
throw_or_mimic<argument_incorrect_type>(text);
}
}
}
};
template <typename T>
struct SignedCheck<T, false> {
template <typename U>
void operator()(bool, U, const std::string&)
{
}
};
template <typename T, typename U>
void check_signed_range(bool negative, U value, const std::string& text)
{
SignedCheck<T, std::numeric_limits<T>::is_signed>()(negative, value, text);
}
} // namespace detail
template <typename R, typename T>
R checked_negate(T&& t, const std::string&, std::true_type)
{
// if we got to here, then `t` is a positive number that fits into
// `R`. So to avoid MSVC C4146, we first cast it to `R`.
// See https://github.com/jarro2783/cxxopts/issues/62 for more details.
return static_cast<R>(-static_cast<R>(t - 1) - 1);
}
template <typename R, typename T>
T checked_negate(T&& t, const std::string& text, std::false_type)
{
throw_or_mimic<argument_incorrect_type>(text);
return t;
}
template <typename T>
void integer_parser(const std::string& text, T& value)
{
std::smatch match;
std::regex_match(text, match, integer_pattern);
if (match.length() == 0) { throw_or_mimic<argument_incorrect_type>(text); }
if (match.length(4) > 0) {
value = 0;
return;
}
using US = typename std::make_unsigned<T>::type;
constexpr bool is_signed = std::numeric_limits<T>::is_signed;
const bool negative = match.length(1) > 0;
const uint8_t base = match.length(2) > 0 ? 16 : 10;
auto value_match = match[3];
US result = 0;
for (auto iter = value_match.first; iter != value_match.second; ++iter) {
US digit = 0;
if (*iter >= '0' && *iter <= '9') {
digit = static_cast<US>(*iter - '0');
} else if (base == 16 && *iter >= 'a' && *iter <= 'f') {
digit = static_cast<US>(*iter - 'a' + 10);
} else if (base == 16 && *iter >= 'A' && *iter <= 'F') {
digit = static_cast<US>(*iter - 'A' + 10);
} else {
throw_or_mimic<argument_incorrect_type>(text);
}
const US next = static_cast<US>(result * base + digit);
if (result > next) { throw_or_mimic<argument_incorrect_type>(text); }
result = next;
}
detail::check_signed_range<T>(negative, result, text);
if (negative) {
value = checked_negate<T>(result, text, std::integral_constant<bool, is_signed>());
} else {
value = static_cast<T>(result);
}
}
template <typename T>
void stringstream_parser(const std::string& text, T& value)
{
std::stringstream in(text);
in >> value;
if (!in) { throw_or_mimic<argument_incorrect_type>(text); }
}
inline void parse_value(const std::string& text, uint8_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, int8_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, uint16_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, int16_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, uint32_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, int32_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, uint64_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, int64_t& value) { integer_parser(text, value); }
inline void parse_value(const std::string& text, bool& value)
{
std::smatch result;
std::regex_match(text, result, truthy_pattern);
if (!result.empty()) {
value = true;
return;
}
std::regex_match(text, result, falsy_pattern);
if (!result.empty()) {
value = false;
return;
}
throw_or_mimic<argument_incorrect_type>(text);
}
inline void parse_value(const std::string& text, std::string& value) { value = text; }
// The fallback parser. It uses the stringstream parser to parse all types
// that have not been overloaded explicitly. It has to be placed in the
// source code before all other more specialized templates.
template <typename T>
void parse_value(const std::string& text, T& value)
{
stringstream_parser(text, value);
}
template <typename T>
void parse_value(const std::string& text, std::vector<T>& value)
{
std::stringstream in(text);
std::string token;
while (in.eof() == false && std::getline(in, token, CXXOPTS_VECTOR_DELIMITER)) {
T v;
parse_value(token, v);
value.emplace_back(std::move(v));
}
}
#ifdef CXXOPTS_HAS_OPTIONAL
template <typename T>
void parse_value(const std::string& text, std::optional<T>& value)
{
T result;
parse_value(text, result);
value = std::move(result);
}
#endif
inline void parse_value(const std::string& text, char& c)
{
if (text.length() != 1) { throw_or_mimic<argument_incorrect_type>(text); }
c = text[0];
}
template <typename T>
struct type_is_container {
static constexpr bool value = false;
};
template <typename T>
struct type_is_container<std::vector<T>> {
static constexpr bool value = true;
};
template <typename T>
class abstract_value : public Value {
using Self = abstract_value<T>;
public:
abstract_value() : m_result(std::make_shared<T>()), m_store(m_result.get()) {}
abstract_value(T* t) : m_store(t) {}
virtual ~abstract_value() = default;
abstract_value(const abstract_value& rhs)
{
if (rhs.m_result) {
m_result = std::make_shared<T>();
m_store = m_result.get();
} else {
m_store = rhs.m_store;
}
m_default = rhs.m_default;
m_implicit = rhs.m_implicit;
m_default_value = rhs.m_default_value;
m_implicit_value = rhs.m_implicit_value;
}
void parse(const std::string& text) const { parse_value(text, *m_store); }
bool is_container() const { return type_is_container<T>::value; }
void parse() const { parse_value(m_default_value, *m_store); }
bool has_default() const { return m_default; }
bool has_implicit() const { return m_implicit; }
std::shared_ptr<Value> default_value(const std::string& value)
{
m_default = true;
m_default_value = value;
return shared_from_this();
}
std::shared_ptr<Value> implicit_value(const std::string& value)
{
m_implicit = true;
m_implicit_value = value;
return shared_from_this();
}
std::shared_ptr<Value> no_implicit_value()
{
m_implicit = false;
return shared_from_this();
}
std::string get_default_value() const { return m_default_value; }
std::string get_implicit_value() const { return m_implicit_value; }
bool is_boolean() const { return std::is_same<T, bool>::value; }
const T& get() const
{
if (m_store == nullptr) {
return *m_result;
} else {
return *m_store;
}
}
protected:
std::shared_ptr<T> m_result;
T* m_store;
bool m_default = false;
bool m_implicit = false;
std::string m_default_value;
std::string m_implicit_value;
};
template <typename T>
class standard_value : public abstract_value<T> {
public:
using abstract_value<T>::abstract_value;
std::shared_ptr<Value> clone() const { return std::make_shared<standard_value<T>>(*this); }
};
template <>
class standard_value<bool> : public abstract_value<bool> {
public:
~standard_value() = default;
standard_value() { set_default_and_implicit(); }
standard_value(bool* b) : abstract_value(b) { set_default_and_implicit(); }
std::shared_ptr<Value> clone() const { return std::make_shared<standard_value<bool>>(*this); }
private:
void set_default_and_implicit()
{
m_default = true;
m_default_value = "false";
m_implicit = true;
m_implicit_value = "true";
}
};
} // namespace values
template <typename T>
std::shared_ptr<Value> value()
{
return std::make_shared<values::standard_value<T>>();
}
template <typename T>
std::shared_ptr<Value> value(T& t)
{
return std::make_shared<values::standard_value<T>>(&t);
}
class OptionAdder;
class OptionDetails {
public:
OptionDetails(const std::string& short_,
const std::string& long_,
const String& desc,
std::shared_ptr<const Value> val)
: m_short(short_), m_long(long_), m_desc(desc), m_value(val), m_count(0)
{
}
OptionDetails(const OptionDetails& rhs) : m_desc(rhs.m_desc), m_count(rhs.m_count)
{
m_value = rhs.m_value->clone();
}
OptionDetails(OptionDetails&& rhs) = default;
const String& description() const { return m_desc; }
const Value& value() const { return *m_value; }
std::shared_ptr<Value> make_storage() const { return m_value->clone(); }
const std::string& short_name() const { return m_short; }
const std::string& long_name() const { return m_long; }
private:
std::string m_short;
std::string m_long;
String m_desc;
std::shared_ptr<const Value> m_value;
int m_count;
};
struct HelpOptionDetails {
std::string s;
std::string l;
String desc;
bool has_default;
std::string default_value;
bool has_implicit;
std::string implicit_value;
std::string arg_help;
bool is_container;
bool is_boolean;
};
struct HelpGroupDetails {
std::string name;
std::string description;
std::vector<HelpOptionDetails> options;
};
class OptionValue {
public:
void parse(std::shared_ptr<const OptionDetails> details, const std::string& text)
{
ensure_value(details);
++m_count;
m_value->parse(text);
}
void parse_default(std::shared_ptr<const OptionDetails> details)
{
ensure_value(details);
m_default = true;
m_value->parse();
}
size_t count() const noexcept { return m_count; }
// TODO: maybe default options should count towards the number of arguments
bool has_default() const noexcept { return m_default; }
template <typename T>
const T& as() const
{
if (m_value == nullptr) { throw_or_mimic<std::domain_error>("No value"); }
#ifdef CXXOPTS_NO_RTTI
return static_cast<const values::standard_value<T>&>(*m_value).get();
#else
return dynamic_cast<const values::standard_value<T>&>(*m_value).get();
#endif
}
private:
void ensure_value(std::shared_ptr<const OptionDetails> details)
{
if (m_value == nullptr) { m_value = details->make_storage(); }
}
std::shared_ptr<Value> m_value;
size_t m_count = 0;
bool m_default = false;
};
class KeyValue {
public:
KeyValue(std::string key_, std::string value_)
: m_key(std::move(key_)), m_value(std::move(value_))
{
}
const std::string& key() const { return m_key; }
const std::string& value() const { return m_value; }
template <typename T>
T as() const
{
T result;
values::parse_value(m_value, result);
return result;
}
private:
std::string m_key;
std::string m_value;
};
class ParseResult {
public:
ParseResult(
const std::shared_ptr<std::unordered_map<std::string, std::shared_ptr<OptionDetails>>>,
std::vector<std::string>,
bool allow_unrecognised,
int&,
char**&);
size_t count(const std::string& o) const
{
auto iter = m_options->find(o);
if (iter == m_options->end()) { return 0; }
auto riter = m_results.find(iter->second);
return riter->second.count();
}
const OptionValue& operator[](const std::string& option) const
{
auto iter = m_options->find(option);
if (iter == m_options->end()) { throw_or_mimic<option_not_present_exception>(option); }
auto riter = m_results.find(iter->second);
return riter->second;
}
const std::vector<KeyValue>& arguments() const { return m_sequential; }
private:
void parse(int& argc, char**& argv);
void add_to_option(const std::string& option, const std::string& arg);
bool consume_positional(std::string a);
void parse_option(std::shared_ptr<OptionDetails> value,
const std::string& name,
const std::string& arg = "");
void parse_default(std::shared_ptr<OptionDetails> details);
void checked_parse_arg(int argc,
char* argv[],
int& current,
std::shared_ptr<OptionDetails> value,
const std::string& name);
const std::shared_ptr<std::unordered_map<std::string, std::shared_ptr<OptionDetails>>> m_options;
std::vector<std::string> m_positional;
std::vector<std::string>::iterator m_next_positional;
std::unordered_set<std::string> m_positional_set;
std::unordered_map<std::shared_ptr<OptionDetails>, OptionValue> m_results;
bool m_allow_unrecognised;
std::vector<KeyValue> m_sequential;
};
struct Option {
Option(const std::string& opts,
const std::string& desc,
const std::shared_ptr<const Value>& value = ::cxxopts::value<bool>(),
const std::string& arg_help = "")
: opts_(opts), desc_(desc), value_(value), arg_help_(arg_help)
{
}
std::string opts_;
std::string desc_;
std::shared_ptr<const Value> value_;
std::string arg_help_;
};
class Options {
typedef std::unordered_map<std::string, std::shared_ptr<OptionDetails>> OptionMap;
public:
Options(std::string program, std::string help_string = "")
: m_program(std::move(program)),
m_help_string(toLocalString(std::move(help_string))),
m_custom_help("[OPTION...]"),
m_positional_help("positional parameters"),
m_show_positional(false),
m_allow_unrecognised(false),
m_options(std::make_shared<OptionMap>()),
m_next_positional(m_positional.end())
{
}
Options& positional_help(std::string help_text)
{
m_positional_help = std::move(help_text);
return *this;
}
Options& custom_help(std::string help_text)
{
m_custom_help = std::move(help_text);
return *this;
}
Options& show_positional_help()
{
m_show_positional = true;
return *this;
}
Options& allow_unrecognised_options()
{
m_allow_unrecognised = true;
return *this;
}
ParseResult parse(int& argc, char**& argv);
OptionAdder add_options(std::string group = "");
void add_options(const std::string& group, std::initializer_list<Option> options);
void add_option(const std::string& group, const Option& option);
void add_option(const std::string& group,
const std::string& s,
const std::string& l,
std::string desc,
std::shared_ptr<const Value> value,
std::string arg_help);
// parse positional arguments into the given option
void parse_positional(std::string option);
void parse_positional(std::vector<std::string> options);
void parse_positional(std::initializer_list<std::string> options);
template <typename Iterator>
void parse_positional(Iterator begin, Iterator end)
{
parse_positional(std::vector<std::string>{begin, end});
}
std::string help(const std::vector<std::string>& groups = {}) const;
const std::vector<std::string> groups() const;
const HelpGroupDetails& group_help(const std::string& group) const;
private:
void add_one_option(const std::string& option, std::shared_ptr<OptionDetails> details);
String help_one_group(const std::string& group) const;
void generate_group_help(String& result, const std::vector<std::string>& groups) const;
void generate_all_groups_help(String& result) const;
std::string m_program;
String m_help_string;
std::string m_custom_help;
std::string m_positional_help;
bool m_show_positional;
bool m_allow_unrecognised;
std::shared_ptr<OptionMap> m_options;
std::vector<std::string> m_positional;
std::vector<std::string>::iterator m_next_positional;
std::unordered_set<std::string> m_positional_set;
// mapping from groups to help options
std::map<std::string, HelpGroupDetails> m_help;
};
class OptionAdder {
public:
OptionAdder(Options& options, std::string group) : m_options(options), m_group(std::move(group))
{
}
OptionAdder& operator()(const std::string& opts,
const std::string& desc,
std::shared_ptr<const Value> value = ::cxxopts::value<bool>(),
std::string arg_help = "");
private:
Options& m_options;
std::string m_group;
};
namespace {
constexpr int OPTION_LONGEST = 30;
constexpr int OPTION_DESC_GAP = 2;
std::basic_regex<char> option_matcher("--([[:alnum:]][-_[:alnum:]]+)(=(.*))?|-([[:alnum:]]+)");
std::basic_regex<char> option_specifier("(([[:alnum:]]),)?[ ]*([[:alnum:]][-_[:alnum:]]*)?");
String format_option(const HelpOptionDetails& o)
{
auto& s = o.s;
auto& l = o.l;
String result = " ";
if (s.size() > 0) {
result += "-" + toLocalString(s) + ",";
} else {
result += " ";
}
if (l.size() > 0) { result += " --" + toLocalString(l); }
auto arg = o.arg_help.size() > 0 ? toLocalString(o.arg_help) : "arg";
if (!o.is_boolean) {
if (o.has_implicit) {
result += " [=" + arg + "(=" + toLocalString(o.implicit_value) + ")]";
} else {
result += " " + arg;
}
}
return result;
}
String format_description(const HelpOptionDetails& o, size_t start, size_t width)
{
auto desc = o.desc;
if (o.has_default && (!o.is_boolean || o.default_value != "false")) {
if (o.default_value != "") {
desc += toLocalString(" (default: " + o.default_value + ")");
} else {
desc += toLocalString(" (default: \"\")");
}
}
String result;
auto current = std::begin(desc);
auto startLine = current;
auto lastSpace = current;
auto size = size_t{};
while (current != std::end(desc)) {
if (*current == ' ') { lastSpace = current; }
if (*current == '\n') {
startLine = current + 1;
lastSpace = startLine;
} else if (size > width) {
if (lastSpace == startLine) {
stringAppend(result, startLine, current + 1);
stringAppend(result, "\n");
stringAppend(result, start, ' ');
startLine = current + 1;
lastSpace = startLine;
} else {
stringAppend(result, startLine, lastSpace);
stringAppend(result, "\n");
stringAppend(result, start, ' ');
startLine = lastSpace + 1;
lastSpace = startLine;
}
size = 0;
} else {
++size;
}
++current;
}
// append whatever is left
stringAppend(result, startLine, current);
return result;
}
} // namespace
inline ParseResult::ParseResult(
const std::shared_ptr<std::unordered_map<std::string, std::shared_ptr<OptionDetails>>> options,
std::vector<std::string> positional,
bool allow_unrecognised,
int& argc,
char**& argv)
: m_options(options),
m_positional(std::move(positional)),
m_next_positional(m_positional.begin()),
m_allow_unrecognised(allow_unrecognised)
{
parse(argc, argv);
}
inline void Options::add_options(const std::string& group, std::initializer_list<Option> options)
{
OptionAdder option_adder(*this, group);
for (const auto& option : options) {
option_adder(option.opts_, option.desc_, option.value_, option.arg_help_);
}
}
inline OptionAdder Options::add_options(std::string group)
{
return OptionAdder(*this, std::move(group));
}
inline OptionAdder& OptionAdder::operator()(const std::string& opts,
const std::string& desc,
std::shared_ptr<const Value> value,
std::string arg_help)
{
std::match_results<const char*> result;
std::regex_match(opts.c_str(), result, option_specifier);
if (result.empty()) { throw_or_mimic<invalid_option_format_error>(opts); }
const auto& short_match = result[2];
const auto& long_match = result[3];
if (!short_match.length() && !long_match.length()) {
throw_or_mimic<invalid_option_format_error>(opts);
} else if (long_match.length() == 1 && short_match.length()) {
throw_or_mimic<invalid_option_format_error>(opts);
}
auto option_names = [](const std::sub_match<const char*>& short_,
const std::sub_match<const char*>& long_) {
if (long_.length() == 1) {
return std::make_tuple(long_.str(), short_.str());
} else {
return std::make_tuple(short_.str(), long_.str());
}
}(short_match, long_match);
m_options.add_option(m_group,
std::get<0>(option_names),
std::get<1>(option_names),
desc,
value,
std::move(arg_help));
return *this;
}
inline void ParseResult::parse_default(std::shared_ptr<OptionDetails> details)
{
m_results[details].parse_default(details);
}
inline void ParseResult::parse_option(std::shared_ptr<OptionDetails> value,
const std::string& /*name*/,
const std::string& arg)
{
auto& result = m_results[value];
result.parse(value, arg);
m_sequential.emplace_back(value->long_name(), arg);
}
inline void ParseResult::checked_parse_arg(int argc,
char* argv[],
int& current,
std::shared_ptr<OptionDetails> value,
const std::string& name)
{
if (current + 1 >= argc) {
if (value->value().has_implicit()) {
parse_option(value, name, value->value().get_implicit_value());
} else {
throw_or_mimic<missing_argument_exception>(name);
}
} else {
if (value->value().has_implicit()) {
parse_option(value, name, value->value().get_implicit_value());
} else {
parse_option(value, name, argv[current + 1]);
++current;
}
}
}
inline void ParseResult::add_to_option(const std::string& option, const std::string& arg)
{
auto iter = m_options->find(option);
if (iter == m_options->end()) { throw_or_mimic<option_not_exists_exception>(option); }
parse_option(iter->second, option, arg);
}
inline bool ParseResult::consume_positional(std::string a)
{
while (m_next_positional != m_positional.end()) {
auto iter = m_options->find(*m_next_positional);
if (iter != m_options->end()) {
auto& result = m_results[iter->second];
if (!iter->second->value().is_container()) {
if (result.count() == 0) {
add_to_option(*m_next_positional, a);
++m_next_positional;
return true;
} else {
++m_next_positional;
continue;
}
} else {
add_to_option(*m_next_positional, a);
return true;
}
} else {
throw_or_mimic<option_not_exists_exception>(*m_next_positional);
}
}
return false;
}
inline void Options::parse_positional(std::string option)
{
parse_positional(std::vector<std::string>{std::move(option)});
}
inline void Options::parse_positional(std::vector<std::string> options)
{
m_positional = std::move(options);
m_next_positional = m_positional.begin();
m_positional_set.insert(m_positional.begin(), m_positional.end());
}
inline void Options::parse_positional(std::initializer_list<std::string> options)
{
parse_positional(std::vector<std::string>(std::move(options)));
}
inline ParseResult Options::parse(int& argc, char**& argv)
{
ParseResult result(m_options, m_positional, m_allow_unrecognised, argc, argv);
return result;
}
inline void ParseResult::parse(int& argc, char**& argv)
{
int current = 1;
int nextKeep = 1;
bool consume_remaining = false;
while (current != argc) {
if (strcmp(argv[current], "--") == 0) {
consume_remaining = true;
++current;
break;
}
std::match_results<const char*> result;
std::regex_match(argv[current], result, option_matcher);
if (result.empty()) {
// not a flag
// but if it starts with a `-`, then it's an error
if (argv[current][0] == '-' && argv[current][1] != '\0') {
if (!m_allow_unrecognised) { throw_or_mimic<option_syntax_exception>(argv[current]); }
}
// if true is returned here then it was consumed, otherwise it is
// ignored
if (consume_positional(argv[current])) {
} else {
argv[nextKeep] = argv[current];
++nextKeep;
}
// if we return from here then it was parsed successfully, so continue
} else {
// short or long option?
if (result[4].length() != 0) {
const std::string& s = result[4];
for (std::size_t i = 0; i != s.size(); ++i) {
std::string name(1, s[i]);
auto iter = m_options->find(name);
if (iter == m_options->end()) {
if (m_allow_unrecognised) {
continue;
} else {
// error
throw_or_mimic<option_not_exists_exception>(name);
}
}
auto value = iter->second;
if (i + 1 == s.size()) {
// it must be the last argument
checked_parse_arg(argc, argv, current, value, name);
} else if (value->value().has_implicit()) {
parse_option(value, name, value->value().get_implicit_value());
} else {
// error
throw_or_mimic<option_requires_argument_exception>(name);
}
}
} else if (result[1].length() != 0) {
const std::string& name = result[1];
auto iter = m_options->find(name);
if (iter == m_options->end()) {
if (m_allow_unrecognised) {
// keep unrecognised options in argument list, skip to next argument
argv[nextKeep] = argv[current];
++nextKeep;
++current;
continue;
} else {
// error
throw_or_mimic<option_not_exists_exception>(name);
}
}
auto opt = iter->second;
// equals provided for long option?
if (result[2].length() != 0) {
// parse the option given
parse_option(opt, name, result[3]);
} else {
// parse the next argument
checked_parse_arg(argc, argv, current, opt, name);
}
}
}
++current;
}
for (auto& opt : *m_options) {
auto& detail = opt.second;
auto& value = detail->value();
auto& store = m_results[detail];
if (value.has_default() && !store.count() && !store.has_default()) { parse_default(detail); }
}
if (consume_remaining) {
while (current < argc) {
if (!consume_positional(argv[current])) { break; }
++current;
}
// adjust argv for any that couldn't be swallowed
while (current != argc) {
argv[nextKeep] = argv[current];
++nextKeep;
++current;
}
}
argc = nextKeep;
}
inline void Options::add_option(const std::string& group, const Option& option)
{
add_options(group, {option});
}
inline void Options::add_option(const std::string& group,
const std::string& s,
const std::string& l,
std::string desc,
std::shared_ptr<const Value> value,
std::string arg_help)
{
auto stringDesc = toLocalString(std::move(desc));
auto option = std::make_shared<OptionDetails>(s, l, stringDesc, value);
if (s.size() > 0) { add_one_option(s, option); }
if (l.size() > 0) { add_one_option(l, option); }
// add the help details
auto& options = m_help[group];
options.options.emplace_back(HelpOptionDetails{s,
l,
stringDesc,
value->has_default(),
value->get_default_value(),
value->has_implicit(),
value->get_implicit_value(),
std::move(arg_help),
value->is_container(),
value->is_boolean()});
}
inline void Options::add_one_option(const std::string& option,
std::shared_ptr<OptionDetails> details)
{
auto in = m_options->emplace(option, details);
if (!in.second) { throw_or_mimic<option_exists_error>(option); }
}
inline String Options::help_one_group(const std::string& g) const
{
typedef std::vector<std::pair<String, String>> OptionHelp;
auto group = m_help.find(g);
if (group == m_help.end()) { return ""; }
OptionHelp format;
size_t longest = 0;
String result;
if (!g.empty()) { result += toLocalString(" " + g + " options:\n"); }
for (const auto& o : group->second.options) {
if (m_positional_set.find(o.l) != m_positional_set.end() && !m_show_positional) { continue; }
auto s = format_option(o);
longest = (std::max)(longest, stringLength(s));
format.push_back(std::make_pair(s, String()));
}
longest = (std::min)(longest, static_cast<size_t>(OPTION_LONGEST));
// widest allowed description
auto allowed = size_t{76} - longest - OPTION_DESC_GAP;
auto fiter = format.begin();
for (const auto& o : group->second.options) {
if (m_positional_set.find(o.l) != m_positional_set.end() && !m_show_positional) { continue; }
auto d = format_description(o, longest + OPTION_DESC_GAP, allowed);
result += fiter->first;
if (stringLength(fiter->first) > longest) {
result += '\n';
result += toLocalString(std::string(longest + OPTION_DESC_GAP, ' '));
} else {
result +=
toLocalString(std::string(longest + OPTION_DESC_GAP - stringLength(fiter->first), ' '));
}
result += d;
result += '\n';
++fiter;
}
return result;
}
inline void Options::generate_group_help(String& result,
const std::vector<std::string>& print_groups) const
{
for (size_t i = 0; i != print_groups.size(); ++i) {
const String& group_help_text = help_one_group(print_groups[i]);
if (empty(group_help_text)) { continue; }
result += group_help_text;
if (i < print_groups.size() - 1) { result += '\n'; }
}
}
inline void Options::generate_all_groups_help(String& result) const
{
std::vector<std::string> all_groups;
all_groups.reserve(m_help.size());
for (auto& group : m_help) {
all_groups.push_back(group.first);
}
generate_group_help(result, all_groups);
}
inline std::string Options::help(const std::vector<std::string>& help_groups) const
{
String result =
m_help_string + "\nUsage:\n " + toLocalString(m_program) + " " + toLocalString(m_custom_help);
if (m_positional.size() > 0 && m_positional_help.size() > 0) {
result += " " + toLocalString(m_positional_help);
}
result += "\n\n";
if (help_groups.size() == 0) {
generate_all_groups_help(result);
} else {
generate_group_help(result, help_groups);
}
return toUTF8String(result);
}
inline const std::vector<std::string> Options::groups() const
{
std::vector<std::string> g;
std::transform(
m_help.begin(),
m_help.end(),
std::back_inserter(g),
[](const std::map<std::string, HelpGroupDetails>::value_type& pair) { return pair.first; });
return g;
}
inline const HelpGroupDetails& Options::group_help(const std::string& group) const
{
return m_help.at(group);
}
} // namespace cxxopts
#endif // CXXOPTS_HPP_INCLUDED
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/utilities/log_parser.hpp
|
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <chrono>
#include <rmm/detail/error.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include "rapidcsv.h"
#include <cstdint>
#include <iomanip>
#include <limits>
#include <memory>
#include <numeric>
#include <sstream>
#include <stdexcept>
#include <string>
namespace rmm::detail {
enum class action { ALLOCATE, FREE, ALLOCATE_FAILURE };
/**
* @brief Represents an allocation event
*
*/
struct event {
event() = default;
event(event const&) = default;
event& operator=(event const&) = default;
event(event&&) noexcept = default;
event& operator=(event&&) noexcept = default;
~event() = default;
event(action act, std::size_t size, void const* ptr)
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
: act{act}, size{size}, pointer{reinterpret_cast<uintptr_t>(ptr)}
{
}
// NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
event(action act, std::size_t size, uintptr_t ptr) : act{act}, size{size}, pointer{ptr} {}
event(std::size_t tid,
action act,
std::size_t size, // NOLINT(bugprone-easily-swappable-parameters)
uintptr_t ptr,
uintptr_t stream,
std::size_t index)
: act{act}, size{size}, pointer{ptr}, thread_id{tid}, stream{stream}, index{index}
{
}
event(
std::size_t tid, action act, std::size_t size, void* ptr, uintptr_t stream, std::size_t index)
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-reinterpret-cast)
: event{tid, act, size, reinterpret_cast<uintptr_t>(ptr), stream, index}
{
}
friend std::ostream& operator<<(std::ostream& os, event const& evt);
action act{}; ///< Indicates if the event is an allocation or a free
std::size_t size{}; ///< The size of the memory allocated or freed
uintptr_t pointer{}; ///< The pointer returned from an allocation, or the
///< pointer freed
std::size_t thread_id{}; ///< ID of the thread that initiated the event
uintptr_t stream{}; ///< Numeric representation of the CUDA stream on which the event occurred
std::size_t index{}; ///< Original ordering index of the event
};
inline std::ostream& operator<<(std::ostream& os, event const& evt)
{
const auto* act_string{[&evt] {
switch (evt.act) {
case action::ALLOCATE: return "allocate";
case action::FREE: return "free";
default: return "allocate failure";
}
}()};
const auto format_width{9};
os << "Thread: " << evt.thread_id << std::setw(format_width) << act_string
<< " Size: " << std::setw(std::numeric_limits<std::size_t>::digits10) << evt.size
<< " Pointer: "
<< "0x" << std::hex << evt.pointer << std::dec << " Stream: " << evt.stream;
return os;
}
/**
* @brief Parse a log timestamp into a std::chrono::time_point
*
* @note currently unused. Seemed necessary for ordering but it appears the log currently
* is in timestamp order even for multithreaded logs.
* @note This function can be simplified with C++20 and later.
*
* @param str_time The input time in format "HH:MM:SS:us" where us is a 6 digits microseconds part
* of the current second. (This is the format rmm::mr::logging_resource_adaptor outputs)
* @return std::chrono::time_point<std::chrono::system_clock> Converted time point.
*/
inline std::chrono::time_point<std::chrono::system_clock> parse_time(std::string const& str_time)
{
std::size_t current = str_time.find(':');
std::size_t previous = 0;
int hours = std::stoi(str_time.substr(previous, current - previous));
previous = current;
current = str_time.find(':');
int minutes = std::stoi(str_time.substr(previous, current - previous));
previous = current;
current = str_time.find(':');
int seconds = std::stoi(str_time.substr(previous, current - previous));
int microseconds = std::stoi(str_time.substr(current + 1, str_time.length()));
auto const epoch_year{1970};
std::tm time{seconds, minutes, hours, 1, 0, epoch_year, 0, 0, 0};
auto timepoint = std::chrono::system_clock::from_time_t(std::mktime(&time));
timepoint += std::chrono::microseconds{microseconds};
return timepoint;
}
/**
* @brief Parses a RMM log file into a vector of events
*
* Parses a log file generated from `rmm::mr::logging_resource_adaptor` into a vector of `event`s.
* An `event` describes an allocation/deallocation event that occurred via the logging adaptor.
*
* @param filename Name of the RMM log file
* @return Vector of events from the contents of the log file
*/
inline std::vector<event> parse_csv(std::string const& filename)
{
rapidcsv::Document csv(filename, rapidcsv::LabelParams(0, -1));
std::vector<std::size_t> tids = csv.GetColumn<std::size_t>("Thread");
std::vector<std::string> actions = csv.GetColumn<std::string>("Action");
auto parse_pointer = [](std::string const& str, uintptr_t& ptr) {
auto const base{16};
ptr = std::stoll(str, nullptr, base);
};
std::vector<uintptr_t> pointers = csv.GetColumn<uintptr_t>("Pointer", parse_pointer);
std::vector<std::size_t> sizes = csv.GetColumn<std::size_t>("Size");
std::vector<uintptr_t> streams = csv.GetColumn<uintptr_t>("Stream");
auto const size_list = {tids.size(), actions.size(), pointers.size(), streams.size()};
RMM_EXPECTS(std::all_of(std::begin(size_list),
std::end(size_list),
[size = sizes.size()](auto val) { return val == size; }),
"Size mismatch in columns of parsed log.");
std::vector<event> events(sizes.size());
for (std::size_t i = 0; i < actions.size(); ++i) {
auto const& action = actions[i];
RMM_EXPECTS((action == "allocate") or (action == "allocate failure") or (action == "free"),
"Invalid action string.");
auto act{action::ALLOCATE_FAILURE};
if (action == "allocate") {
act = action::ALLOCATE;
} else if (action == "free") {
act = action::FREE;
}
events[i] = event{tids[i], act, sizes[i], pointers[i], streams[i], i};
}
return events;
}
} // namespace rmm::detail
| 0 |
rapidsai_public_repos/rmm/benchmarks
|
rapidsai_public_repos/rmm/benchmarks/utilities/rapidcsv.h
|
/*
* rapidcsv.h
*
* URL: https://github.com/d99kris/rapidcsv
* Version: 6.10
*
* Copyright (C) 2017-2020 Kristofer Berggren
* All rights reserved.
*
* rapidcsv is distributed under the BSD 3-Clause license, see LICENSE for details.
*
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#ifdef HAS_CODECVT
#include <codecvt>
#endif
#include <fstream>
#include <functional>
#include <iostream>
#include <map>
#include <sstream>
#include <string>
#include <typeinfo>
#include <vector>
#if defined(_MSC_VER)
#include <BaseTsd.h>
typedef SSIZE_T ssize_t;
#endif
namespace rapidcsv {
#if defined(_MSC_VER)
static const bool sPlatformHasCR = true;
#else
static const bool sPlatformHasCR = false;
#endif
/**
* @brief Datastructure holding parameters controlling how invalid numbers (including
* empty strings) should be handled.
*/
struct ConverterParams {
/**
* @brief Constructor
* @param pHasDefaultConverter specifies if conversion of non-numerical strings shall be
* converted to a default numerical value, instead of causing
* an exception to be thrown (default).
* @param pDefaultFloat floating-point default value to represent invalid numbers.
* @param pDefaultInteger integer default value to represent invalid numbers.
*/
explicit ConverterParams(
const bool pHasDefaultConverter = false,
const long double pDefaultFloat = std::numeric_limits<long double>::signaling_NaN(),
const long long pDefaultInteger = 0)
: mHasDefaultConverter(pHasDefaultConverter),
mDefaultFloat(pDefaultFloat),
mDefaultInteger(pDefaultInteger)
{
}
/**
* @brief specifies if conversion of non-numerical strings shall be converted to a default
* numerical value, instead of causing an exception to be thrown (default).
*/
bool mHasDefaultConverter;
/**
* @brief floating-point default value to represent invalid numbers.
*/
long double mDefaultFloat;
/**
* @brief integer default value to represent invalid numbers.
*/
long long mDefaultInteger;
};
/**
* @brief Exception thrown when attempting to access Document data in a datatype which
* is not supported by the Converter class.
*/
class no_converter : public std::exception {
/**
* @brief Provides details about the exception
* @returns an explanatory string
*/
virtual const char* what() const throw() { return "unsupported conversion datatype"; }
};
/**
* @brief Class providing conversion to/from numerical datatypes and strings. Only
* intended for rapidcsv internal usage, but exposed externally to allow
* specialization for custom datatype conversions.
*/
template <typename T>
class Converter {
public:
/**
* @brief Constructor
* @param pConverterParams specifies how conversion of non-numerical values to
* numerical datatype shall be handled.
*/
Converter(const ConverterParams& pConverterParams) : mConverterParams(pConverterParams) {}
/**
* @brief Converts numerical value to string representation.
* @param pVal numerical value
* @param pStr output string
*/
void ToStr(const T& pVal, std::string& pStr) const
{
if (typeid(T) == typeid(int) || typeid(T) == typeid(long) || typeid(T) == typeid(long long) ||
typeid(T) == typeid(unsigned) || typeid(T) == typeid(unsigned long) ||
typeid(T) == typeid(unsigned long long) || typeid(T) == typeid(float) ||
typeid(T) == typeid(double) || typeid(T) == typeid(long double) ||
typeid(T) == typeid(char)) {
std::ostringstream out;
out << pVal;
pStr = out.str();
} else {
throw no_converter();
}
}
/**
* @brief Converts string holding a numerical value to numerical datatype representation.
* @param pVal numerical value
* @param pStr output string
*/
void ToVal(const std::string& pStr, T& pVal) const
{
try {
if (typeid(T) == typeid(int)) {
pVal = static_cast<T>(std::stoi(pStr));
return;
} else if (typeid(T) == typeid(long)) {
pVal = static_cast<T>(std::stol(pStr));
return;
} else if (typeid(T) == typeid(long long)) {
pVal = static_cast<T>(std::stoll(pStr));
return;
} else if (typeid(T) == typeid(unsigned)) {
pVal = static_cast<T>(std::stoul(pStr));
return;
} else if (typeid(T) == typeid(unsigned long)) {
pVal = static_cast<T>(std::stoul(pStr));
return;
} else if (typeid(T) == typeid(unsigned long long)) {
pVal = static_cast<T>(std::stoull(pStr));
return;
}
} catch (...) {
if (!mConverterParams.mHasDefaultConverter) {
throw;
} else {
pVal = static_cast<T>(mConverterParams.mDefaultInteger);
return;
}
}
try {
if (typeid(T) == typeid(float)) {
pVal = static_cast<T>(std::stof(pStr));
return;
} else if (typeid(T) == typeid(double)) {
pVal = static_cast<T>(std::stod(pStr));
return;
} else if (typeid(T) == typeid(long double)) {
pVal = static_cast<T>(std::stold(pStr));
return;
}
} catch (...) {
if (!mConverterParams.mHasDefaultConverter) {
throw;
} else {
pVal = static_cast<T>(mConverterParams.mDefaultFloat);
return;
}
}
if (typeid(T) == typeid(char)) {
pVal = static_cast<T>(pStr[0]);
return;
} else {
throw no_converter();
}
}
private:
const ConverterParams& mConverterParams;
};
/**
* @brief Specialized implementation handling string to string conversion.
* @param pVal string
* @param pStr string
*/
template <>
inline void Converter<std::string>::ToStr(const std::string& pVal, std::string& pStr) const
{
pStr = pVal;
}
/**
* @brief Specialized implementation handling string to string conversion.
* @param pVal string
* @param pStr string
*/
template <>
inline void Converter<std::string>::ToVal(const std::string& pStr, std::string& pVal) const
{
pVal = pStr;
}
template <typename T>
using ConvFunc = std::function<void(const std::string& pStr, T& pVal)>;
/**
* @brief Datastructure holding parameters controlling which row and column should be
* treated as labels.
*/
struct LabelParams {
/**
* @brief Constructor
* @param pColumnNameIdx specifies the zero-based row index of the column labels, setting
* it to -1 prevents column lookup by label name, and gives access
* to all rows as document data.
* @param pRowNameIdx specifies the zero-based column index of the row labels, setting
* it to -1 prevents row lookup by label name, and gives access
* to all columns as document data.
*/
explicit LabelParams(const int pColumnNameIdx = 0, const int pRowNameIdx = 0)
: mColumnNameIdx(pColumnNameIdx), mRowNameIdx(pRowNameIdx)
{
}
/**
* @brief specifies the zero-based row index of the column labels.
*/
int mColumnNameIdx;
/**
* @brief specifies the zero-based column index of the row labels.
*/
int mRowNameIdx;
};
/**
* @brief Datastructure holding parameters controlling how the CSV data fields are separated.
*/
struct SeparatorParams {
/**
* @brief Constructor
* @param pSeparator specifies the column separator (default ',').
* @param pTrim specifies whether to trim leading and trailing spaces from
* cells read.
* @param pHasCR specifies whether a new document (i.e. not an existing document
* read) should use CR/LF instead of only LF (default is to use standard behavior of underlying
* platforms - CR/LF for Win, and LF for others).
* @param pQuotedLinebreaks specifies whether to allow line breaks in quoted text.
*/
explicit SeparatorParams(const char pSeparator = ',',
const bool pTrim = false,
const bool pHasCR = sPlatformHasCR,
const bool pQuotedLinebreaks = false)
: mSeparator(pSeparator), mTrim(pTrim), mHasCR(pHasCR), mQuotedLinebreaks(pQuotedLinebreaks)
{
}
/**
* @brief specifies the column separator.
*/
char mSeparator;
/**
* @brief specifies whether to trim leading and trailing spaces from cells read.
*/
bool mTrim;
/**
* @brief specifies whether new documents should use CR/LF instead of LF.
*/
bool mHasCR;
/**
* @brief specifies whether to allow line breaks in quoted text.
*/
bool mQuotedLinebreaks;
};
/**
* @brief Class representing a CSV document.
*/
class Document {
public:
/**
* @brief Constructor
* @param pPath specifies the path of an existing CSV-file to populate the
* Document data with.
* @param pLabelParams specifies which row and column should be treated as labels.
* @param pSeparatorParams specifies which field and row separators should be used.
* @param pConverterParams specifies how invalid numbers (including empty strings) should
* be handled.
*/
explicit Document(const std::string& pPath = std::string(),
const LabelParams& pLabelParams = LabelParams(),
const SeparatorParams& pSeparatorParams = SeparatorParams(),
const ConverterParams& pConverterParams = ConverterParams())
: mPath(pPath),
mLabelParams(pLabelParams),
mSeparatorParams(pSeparatorParams),
mConverterParams(pConverterParams)
{
if (!mPath.empty()) { ReadCsv(); }
}
/**
* @brief Constructor
* @param pStream specifies an input stream to read CSV data from.
* @param pLabelParams specifies which row and column should be treated as labels.
* @param pSeparatorParams specifies which field and row separators should be used.
* @param pConverterParams specifies how invalid numbers (including empty strings) should
* be handled.
*/
explicit Document(std::istream& pStream,
const LabelParams& pLabelParams = LabelParams(),
const SeparatorParams& pSeparatorParams = SeparatorParams(),
const ConverterParams& pConverterParams = ConverterParams())
: mPath(),
mLabelParams(pLabelParams),
mSeparatorParams(pSeparatorParams),
mConverterParams(pConverterParams)
{
ReadCsv(pStream);
}
/**
* @brief Copy constructor
* @param pDocument specifies the Document instance to copy.
*/
explicit Document(const Document& pDocument)
: mPath(pDocument.mPath),
mLabelParams(pDocument.mLabelParams),
mSeparatorParams(pDocument.mSeparatorParams),
mConverterParams(pDocument.mConverterParams),
mData(pDocument.mData),
mColumnNames(pDocument.mColumnNames),
mRowNames(pDocument.mRowNames)
{
}
/**
* @brief Read Document data from file.
* @param pPath specifies the path of an existing CSV-file to populate the
* Document data with.
*/
void Load(const std::string& pPath)
{
mPath = pPath;
ReadCsv();
}
/**
* @brief Write Document data to file.
* @param pPath optionally specifies the path where the CSV-file will be created
* (if not specified, the original path provided when creating or
* loading the Document data will be used).
*/
void Save(const std::string& pPath = std::string())
{
if (!pPath.empty()) { mPath = pPath; }
WriteCsv();
}
/**
* @brief Write Document data to stream.
* @param pStream specifies an output stream to write the data to.
*/
void Save(std::ostream& pStream) { WriteCsv(pStream); }
/**
* @brief Get column by index.
* @param pColumnIdx zero-based column index.
* @returns vector of column data.
*/
template <typename T>
std::vector<T> GetColumn(const size_t pColumnIdx) const
{
const ssize_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
std::vector<T> column;
Converter<T> converter(mConverterParams);
for (auto itRow = mData.begin(); itRow != mData.end(); ++itRow) {
if (std::distance(mData.begin(), itRow) > mLabelParams.mColumnNameIdx) {
T val;
converter.ToVal(itRow->at(columnIdx), val);
column.push_back(val);
}
}
return column;
}
/**
* @brief Get column by index.
* @param pColumnIdx zero-based column index.
* @param pToVal conversion function.
* @returns vector of column data.
*/
template <typename T>
std::vector<T> GetColumn(const size_t pColumnIdx, ConvFunc<T> pToVal) const
{
const ssize_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
std::vector<T> column;
for (auto itRow = mData.begin(); itRow != mData.end(); ++itRow) {
if (std::distance(mData.begin(), itRow) > mLabelParams.mColumnNameIdx) {
T val;
pToVal(itRow->at(columnIdx), val);
column.push_back(val);
}
}
return column;
}
/**
* @brief Get column by name.
* @param pColumnName column label name.
* @returns vector of column data.
*/
template <typename T>
std::vector<T> GetColumn(const std::string& pColumnName) const
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
return GetColumn<T>(columnIdx);
}
/**
* @brief Get column by name.
* @param pColumnName column label name.
* @param pToVal conversion function.
* @returns vector of column data.
*/
template <typename T>
std::vector<T> GetColumn(const std::string& pColumnName, ConvFunc<T> pToVal) const
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
return GetColumn<T>(columnIdx, pToVal);
}
/**
* @brief Set column by index.
* @param pColumnIdx zero-based column index.
* @param pColumn vector of column data.
*/
template <typename T>
void SetColumn(const size_t pColumnIdx, const std::vector<T>& pColumn)
{
const size_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
while (pColumn.size() + (mLabelParams.mColumnNameIdx + 1) > GetDataRowCount()) {
std::vector<std::string> row;
row.resize(GetDataColumnCount());
mData.push_back(row);
}
if ((columnIdx + 1) > GetDataColumnCount()) {
for (auto itRow = mData.begin(); itRow != mData.end(); ++itRow) {
itRow->resize(columnIdx + 1 + (mLabelParams.mRowNameIdx + 1));
}
}
Converter<T> converter(mConverterParams);
for (auto itRow = pColumn.begin(); itRow != pColumn.end(); ++itRow) {
std::string str;
converter.ToStr(*itRow, str);
mData.at(std::distance(pColumn.begin(), itRow) + (mLabelParams.mColumnNameIdx + 1))
.at(columnIdx) = str;
}
}
/**
* @brief Set column by name.
* @param pColumnName column label name.
* @param pColumn vector of column data.
*/
template <typename T>
void SetColumn(const std::string& pColumnName, const std::vector<T>& pColumn)
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
SetColumn<T>(columnIdx, pColumn);
}
/**
* @brief Remove column by index.
* @param pColumnIdx zero-based column index.
*/
void RemoveColumn(const size_t pColumnIdx)
{
const ssize_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
for (auto itRow = mData.begin(); itRow != mData.end(); ++itRow) {
itRow->erase(itRow->begin() + columnIdx);
}
}
/**
* @brief Remove column by name.
* @param pColumnName column label name.
*/
void RemoveColumn(const std::string& pColumnName)
{
ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
RemoveColumn(columnIdx);
}
/**
* @brief Get number of data columns.
* @returns column count.
*/
size_t GetColumnCount() const
{
return (mData.size() > 0) ? (mData.at(0).size() - (mLabelParams.mRowNameIdx + 1)) : 0;
}
/**
* @brief Get row by index.
* @param pRowIdx zero-based row index.
* @returns vector of row data.
*/
template <typename T>
std::vector<T> GetRow(const size_t pRowIdx) const
{
const ssize_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
std::vector<T> row;
Converter<T> converter(mConverterParams);
for (auto itCol = mData.at(rowIdx).begin(); itCol != mData.at(rowIdx).end(); ++itCol) {
if (std::distance(mData.at(rowIdx).begin(), itCol) > mLabelParams.mRowNameIdx) {
T val;
converter.ToVal(*itCol, val);
row.push_back(val);
}
}
return row;
}
/**
* @brief Get row by index.
* @param pRowIdx zero-based row index.
* @param pToVal conversion function.
* @returns vector of row data.
*/
template <typename T>
std::vector<T> GetRow(const size_t pRowIdx, ConvFunc<T> pToVal) const
{
const ssize_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
std::vector<T> row;
Converter<T> converter(mConverterParams);
for (auto itCol = mData.at(rowIdx).begin(); itCol != mData.at(rowIdx).end(); ++itCol) {
if (std::distance(mData.at(rowIdx).begin(), itCol) > mLabelParams.mRowNameIdx) {
T val;
pToVal(*itCol, val);
row.push_back(val);
}
}
return row;
}
/**
* @brief Get row by name.
* @param pRowName row label name.
* @returns vector of row data.
*/
template <typename T>
std::vector<T> GetRow(const std::string& pRowName) const
{
ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
return GetRow<T>(rowIdx);
}
/**
* @brief Get row by name.
* @param pRowName row label name.
* @param pToVal conversion function.
* @returns vector of row data.
*/
template <typename T>
std::vector<T> GetRow(const std::string& pRowName, ConvFunc<T> pToVal) const
{
ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
return GetRow<T>(rowIdx, pToVal);
}
/**
* @brief Set row by index.
* @param pRowIdx zero-based row index.
* @param pRow vector of row data.
*/
template <typename T>
void SetRow(const size_t pRowIdx, const std::vector<T>& pRow)
{
const size_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
while ((rowIdx + 1) > GetDataRowCount()) {
std::vector<std::string> row;
row.resize(GetDataColumnCount());
mData.push_back(row);
}
if (pRow.size() > GetDataColumnCount()) {
for (auto itRow = mData.begin(); itRow != mData.end(); ++itRow) {
itRow->resize(pRow.size() + (mLabelParams.mRowNameIdx + 1));
}
}
Converter<T> converter(mConverterParams);
for (auto itCol = pRow.begin(); itCol != pRow.end(); ++itCol) {
std::string str;
converter.ToStr(*itCol, str);
mData.at(rowIdx).at(std::distance(pRow.begin(), itCol) + (mLabelParams.mRowNameIdx + 1)) =
str;
}
}
/**
* @brief Set row by name.
* @param pRowName row label name.
* @param pRow vector of row data.
*/
template <typename T>
void SetRow(const std::string& pRowName, const std::vector<T>& pRow)
{
ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
return SetRow<T>(rowIdx, pRow);
}
/**
* @brief Remove row by index.
* @param pRowIdx zero-based row index.
*/
void RemoveRow(const size_t pRowIdx)
{
const ssize_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
mData.erase(mData.begin() + rowIdx);
}
/**
* @brief Remove row by name.
* @param pRowName row label name.
*/
void RemoveRow(const std::string& pRowName)
{
ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
RemoveRow(rowIdx);
}
/**
* @brief Get number of data rows.
* @returns row count.
*/
size_t GetRowCount() const { return mData.size() - (mLabelParams.mColumnNameIdx + 1); }
/**
* @brief Get cell by index.
* @param pColumnIdx zero-based column index.
* @param pRowIdx zero-based row index.
* @returns cell data.
*/
template <typename T>
T GetCell(const size_t pColumnIdx, const size_t pRowIdx) const
{
const ssize_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
const ssize_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
T val;
Converter<T> converter(mConverterParams);
converter.ToVal(mData.at(rowIdx).at(columnIdx), val);
return val;
}
/**
* @brief Get cell by index.
* @param pColumnIdx zero-based column index.
* @param pRowIdx zero-based row index.
* @param pToVal conversion function.
* @returns cell data.
*/
template <typename T>
T GetCell(const size_t pColumnIdx, const size_t pRowIdx, ConvFunc<T> pToVal) const
{
const ssize_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
const ssize_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
T val;
pToVal(mData.at(rowIdx).at(columnIdx), val);
return val;
}
/**
* @brief Get cell by name.
* @param pColumnName column label name.
* @param pRowName row label name.
* @returns cell data.
*/
template <typename T>
T GetCell(const std::string& pColumnName, const std::string& pRowName) const
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
const ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
return GetCell<T>(columnIdx, rowIdx);
}
/**
* @brief Get cell by name.
* @param pColumnName column label name.
* @param pRowName row label name.
* @param pToVal conversion function.
* @returns cell data.
*/
template <typename T>
T GetCell(const std::string& pColumnName, const std::string& pRowName, ConvFunc<T> pToVal) const
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
const ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
return GetCell<T>(columnIdx, rowIdx, pToVal);
}
/**
* @brief Get cell by column name and row index.
* @param pColumnName column label name.
* @param pRowIdx zero-based row index.
* @returns cell data.
*/
template <typename T>
T GetCell(const std::string& pColumnName, const size_t pRowIdx) const
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
return GetCell<T>(columnIdx, pRowIdx);
}
/**
* @brief Get cell by column name and row index.
* @param pColumnName column label name.
* @param pRowIdx zero-based row index.
* @param pToVal conversion function.
* @returns cell data.
*/
template <typename T>
T GetCell(const std::string& pColumnName, const size_t pRowIdx, ConvFunc<T> pToVal) const
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
return GetCell<T>(columnIdx, pRowIdx, pToVal);
}
/**
* @brief Get cell by column index and row name.
* @param pColumnIdx zero-based column index.
* @param pRowName row label name.
* @returns cell data.
*/
template <typename T>
T GetCell(const size_t pColumnIdx, const std::string& pRowName) const
{
const ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
return GetCell<T>(pColumnIdx, rowIdx);
}
/**
* @brief Get cell by column index and row name.
* @param pColumnIdx zero-based column index.
* @param pRowName row label name.
* @param pToVal conversion function.
* @returns cell data.
*/
template <typename T>
T GetCell(const size_t pColumnIdx, const std::string& pRowName, ConvFunc<T> pToVal) const
{
const ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
return GetCell<T>(pColumnIdx, rowIdx, pToVal);
}
/**
* @brief Set cell by index.
* @param pRowIdx zero-based row index.
* @param pColumnIdx zero-based column index.
* @param pCell cell data.
*/
template <typename T>
void SetCell(const size_t pColumnIdx, const size_t pRowIdx, const T& pCell)
{
const size_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
const size_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
while ((rowIdx + 1) > GetDataRowCount()) {
std::vector<std::string> row;
row.resize(GetDataColumnCount());
mData.push_back(row);
}
if ((columnIdx + 1) > GetDataColumnCount()) {
for (auto itRow = mData.begin(); itRow != mData.end(); ++itRow) {
itRow->resize(columnIdx + 1);
}
}
std::string str;
Converter<T> converter(mConverterParams);
converter.ToStr(pCell, str);
mData.at(rowIdx).at(columnIdx) = str;
}
/**
* @brief Set cell by name.
* @param pColumnName column label name.
* @param pRowName row label name.
* @param pCell cell data.
*/
template <typename T>
void SetCell(const std::string& pColumnName, const std::string& pRowName, const T& pCell)
{
const ssize_t columnIdx = GetColumnIdx(pColumnName);
if (columnIdx < 0) { throw std::out_of_range("column not found: " + pColumnName); }
const ssize_t rowIdx = GetRowIdx(pRowName);
if (rowIdx < 0) { throw std::out_of_range("row not found: " + pRowName); }
SetCell<T>(columnIdx, rowIdx, pCell);
}
/**
* @brief Get column name
* @param pColumnIdx zero-based column index.
* @returns column name.
*/
std::string GetColumnName(const ssize_t pColumnIdx)
{
const ssize_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
if (mLabelParams.mColumnNameIdx < 0) {
throw std::out_of_range("column name row index < 0: " +
std::to_string(mLabelParams.mColumnNameIdx));
}
return mData.at(mLabelParams.mColumnNameIdx).at(columnIdx);
}
/**
* @brief Set column name
* @param pColumnIdx zero-based column index.
* @param pColumnName column name.
*/
void SetColumnName(size_t pColumnIdx, const std::string& pColumnName)
{
const ssize_t columnIdx = pColumnIdx + (mLabelParams.mRowNameIdx + 1);
mColumnNames[pColumnName] = columnIdx;
if (mLabelParams.mColumnNameIdx < 0) {
throw std::out_of_range("column name row index < 0: " +
std::to_string(mLabelParams.mColumnNameIdx));
}
mData.at(mLabelParams.mColumnNameIdx).at(columnIdx) = pColumnName;
}
/**
* @brief Get column names
* @returns vector of column names.
*/
std::vector<std::string> GetColumnNames()
{
if (mLabelParams.mColumnNameIdx >= 0) {
return std::vector<std::string>(
mData.at(mLabelParams.mColumnNameIdx).begin() + (mLabelParams.mRowNameIdx + 1),
mData.at(mLabelParams.mColumnNameIdx).end());
}
return std::vector<std::string>();
}
/**
* @brief Get row name
* @param pRowIdx zero-based column index.
* @returns row name.
*/
std::string GetRowName(const ssize_t pRowIdx)
{
const ssize_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
if (mLabelParams.mRowNameIdx < 0) {
throw std::out_of_range("row name column index < 0: " +
std::to_string(mLabelParams.mRowNameIdx));
}
return mData.at(rowIdx).at(mLabelParams.mRowNameIdx);
}
/**
* @brief Set row name
* @param pRowIdx zero-based row index.
* @param pRowName row name.
*/
void SetRowName(size_t pRowIdx, const std::string& pRowName)
{
const ssize_t rowIdx = pRowIdx + (mLabelParams.mColumnNameIdx + 1);
mRowNames[pRowName] = rowIdx;
if (mLabelParams.mRowNameIdx < 0) {
throw std::out_of_range("row name column index < 0: " +
std::to_string(mLabelParams.mRowNameIdx));
}
mData.at(rowIdx).at(mLabelParams.mRowNameIdx) = pRowName;
}
/**
* @brief Get row names
* @returns vector of row names.
*/
std::vector<std::string> GetRowNames()
{
std::vector<std::string> rownames;
if (mLabelParams.mRowNameIdx >= 0) {
for (auto itRow = mData.begin(); itRow != mData.end(); ++itRow) {
if (std::distance(mData.begin(), itRow) > mLabelParams.mColumnNameIdx) {
rownames.push_back(itRow->at(mLabelParams.mRowNameIdx));
}
}
}
return rownames;
}
private:
void ReadCsv()
{
std::ifstream stream;
stream.exceptions(std::ifstream::failbit | std::ifstream::badbit);
stream.open(mPath, std::ios::binary);
#ifdef HAS_CODECVT
stream.seekg(0, std::ios::end);
std::streamsize length = stream.tellg();
stream.seekg(0, std::ios::beg);
std::vector<char> bom(2, '\0');
if (length >= 2) { stream.read(bom.data(), 2); }
static const std::vector<char> bomU16le = {'\xff', '\xfe'};
static const std::vector<char> bomU16be = {'\xfe', '\xff'};
if ((bom == bomU16le) || (bom == bomU16be)) {
mIsUtf16 = true;
mIsLE = (bom == bomU16le);
std::wifstream wstream;
wstream.exceptions(std::wifstream::failbit | std::wifstream::badbit);
wstream.open(mPath, std::ios::binary);
if (mIsLE) {
wstream.imbue(
std::locale(wstream.getloc(),
new std::codecvt_utf16<wchar_t,
0x10ffff,
static_cast<std::codecvt_mode>(std::consume_header |
std::little_endian)>));
} else {
wstream.imbue(std::locale(wstream.getloc(),
new std::codecvt_utf16<wchar_t, 0x10ffff, std::consume_header>));
}
std::wstringstream wss;
wss << wstream.rdbuf();
std::string utf8 = ToString(wss.str());
std::stringstream ss(utf8);
ReadCsv(ss);
} else
#endif
{
stream.seekg(0, std::ios::beg);
ReadCsv(stream);
}
}
void ReadCsv(std::istream& pStream)
{
pStream.seekg(0, std::ios::end);
std::streamsize fileLength = pStream.tellg();
pStream.seekg(0, std::ios::beg);
const std::streamsize bufLength = 64 * 1024;
std::vector<char> buffer(bufLength);
std::vector<std::string> row;
std::string cell;
bool quoted = false;
int cr = 0;
int lf = 0;
while (fileLength > 0) {
std::streamsize readLength = std::min(fileLength, bufLength);
pStream.read(buffer.data(), readLength);
for (int i = 0; i < readLength; ++i) {
if (buffer[i] == '"') {
if (cell.empty() || cell[0] == '"') { quoted = !quoted; }
cell += buffer[i];
} else if (buffer[i] == mSeparatorParams.mSeparator) {
if (!quoted) {
row.push_back(mSeparatorParams.mTrim ? Trim(cell) : cell);
cell.clear();
} else {
cell += buffer[i];
}
} else if (buffer[i] == '\r') {
if (mSeparatorParams.mQuotedLinebreaks && quoted) {
cell += buffer[i];
} else {
++cr;
}
} else if (buffer[i] == '\n') {
if (mSeparatorParams.mQuotedLinebreaks && quoted) {
cell += buffer[i];
} else {
++lf;
row.push_back(mSeparatorParams.mTrim ? Trim(cell) : cell);
cell.clear();
mData.push_back(row);
row.clear();
quoted = false;
}
} else {
cell += buffer[i];
}
}
fileLength -= readLength;
}
// Handle last line without linebreak
if (!cell.empty() || !row.empty()) {
row.push_back(mSeparatorParams.mTrim ? Trim(cell) : cell);
cell.clear();
mData.push_back(row);
row.clear();
}
// Assume CR/LF if at least half the linebreaks have CR
mSeparatorParams.mHasCR = (cr > (lf / 2));
// Set up column labels
if ((mLabelParams.mColumnNameIdx >= 0) && (mData.size() > 0)) {
int i = 0;
for (auto& columnName : mData[mLabelParams.mColumnNameIdx]) {
mColumnNames[columnName] = i++;
}
}
// Set up row labels
if ((mLabelParams.mRowNameIdx >= 0) &&
(static_cast<ssize_t>(mData.size()) > (mLabelParams.mColumnNameIdx + 1))) {
int i = 0;
for (auto& dataRow : mData) {
mRowNames[dataRow[mLabelParams.mRowNameIdx]] = i++;
}
}
}
void WriteCsv() const
{
#ifdef HAS_CODECVT
if (mIsUtf16) {
std::stringstream ss;
WriteCsv(ss);
std::string utf8 = ss.str();
std::wstring wstr = ToWString(utf8);
std::wofstream wstream;
wstream.exceptions(std::wofstream::failbit | std::wofstream::badbit);
wstream.open(mPath, std::ios::binary | std::ios::trunc);
if (mIsLE) {
wstream.imbue(std::locale(
wstream.getloc(),
new std::
codecvt_utf16<wchar_t, 0x10ffff, static_cast<std::codecvt_mode>(std::little_endian)>));
} else {
wstream.imbue(std::locale(wstream.getloc(), new std::codecvt_utf16<wchar_t, 0x10ffff>));
}
wstream << (wchar_t)0xfeff;
wstream << wstr;
} else
#endif
{
std::ofstream stream;
stream.exceptions(std::ofstream::failbit | std::ofstream::badbit);
stream.open(mPath, std::ios::binary | std::ios::trunc);
WriteCsv(stream);
}
}
void WriteCsv(std::ostream& pStream) const
{
for (auto itr = mData.begin(); itr != mData.end(); ++itr) {
for (auto itc = itr->begin(); itc != itr->end(); ++itc) {
if ((std::string::npos == itc->find(mSeparatorParams.mSeparator)) ||
((itc->length() >= 2) && ((*itc)[0] == '\"') && ((*itc)[itc->length() - 1] == '\"'))) {
pStream << *itc;
} else {
pStream << '"' << *itc << '"';
}
if (std::distance(itc, itr->end()) > 1) { pStream << mSeparatorParams.mSeparator; }
}
pStream << (mSeparatorParams.mHasCR ? "\r\n" : "\n");
}
}
ssize_t GetColumnIdx(const std::string& pColumnName) const
{
if (mLabelParams.mColumnNameIdx >= 0) {
if (mColumnNames.find(pColumnName) != mColumnNames.end()) {
return mColumnNames.at(pColumnName) - (mLabelParams.mRowNameIdx + 1);
}
}
return -1;
}
ssize_t GetRowIdx(const std::string& pRowName) const
{
if (mLabelParams.mRowNameIdx >= 0) {
if (mRowNames.find(pRowName) != mRowNames.end()) {
return mRowNames.at(pRowName) - (mLabelParams.mColumnNameIdx + 1);
}
}
return -1;
}
size_t GetDataRowCount() const { return mData.size(); }
size_t GetDataColumnCount() const { return (mData.size() > 0) ? mData.at(0).size() : 0; }
#ifdef HAS_CODECVT
#if defined(_MSC_VER)
#pragma warning(disable : 4996)
#endif
static std::string ToString(const std::wstring& pWStr)
{
size_t len = std::wcstombs(nullptr, pWStr.c_str(), 0) + 1;
char* cstr = new char[len];
std::wcstombs(cstr, pWStr.c_str(), len);
std::string str(cstr);
delete[] cstr;
return str;
}
static std::wstring ToWString(const std::string& pStr)
{
size_t len = 1 + mbstowcs(nullptr, pStr.c_str(), 0);
wchar_t* wcstr = new wchar_t[len];
std::mbstowcs(wcstr, pStr.c_str(), len);
std::wstring wstr(wcstr);
delete[] wcstr;
return wstr;
}
#if defined(_MSC_VER)
#pragma warning(default : 4996)
#endif
#endif
static std::string Trim(const std::string& pStr)
{
std::string str = pStr;
// ltrim
str.erase(str.begin(),
std::find_if(str.begin(), str.end(), [](int ch) { return !isspace(ch); }));
// rtrim
str.erase(std::find_if(str.rbegin(), str.rend(), [](int ch) { return !isspace(ch); }).base(),
str.end());
return str;
}
private:
std::string mPath;
LabelParams mLabelParams;
SeparatorParams mSeparatorParams;
ConverterParams mConverterParams;
std::vector<std::vector<std::string>> mData;
std::map<std::string, size_t> mColumnNames;
std::map<std::string, size_t> mRowNames;
#ifdef HAS_CODECVT
bool mIsUtf16 = false;
bool mIsLE = false;
#endif
};
} // namespace rapidcsv
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/.devcontainer/README.md
|
# RMM Development Containers
This directory contains [devcontainer configurations](https://containers.dev/implementors/json_reference/) for using VSCode to [develop in a container](https://code.visualstudio.com/docs/devcontainers/containers) via the `Remote Containers` [extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) or [GitHub Codespaces](https://github.com/codespaces).
This container is a turnkey development environment for building and testing the RMM C++ and Python libraries.
## Table of Contents
* [Prerequisites](#prerequisites)
* [Host bind mounts](#host-bind-mounts)
* [Launch a Dev Container](#launch-a-dev-container)
## Prerequisites
* [VSCode](https://code.visualstudio.com/download)
* [VSCode Remote Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
## Host bind mounts
By default, the following directories are bind-mounted into the devcontainer:
* `${repo}:/home/coder/rmm`
* `${repo}/../.aws:/home/coder/.aws`
* `${repo}/../.local:/home/coder/.local`
* `${repo}/../.cache:/home/coder/.cache`
* `${repo}/../.conda:/home/coder/.conda`
* `${repo}/../.config:/home/coder/.config`
This ensures caches, configurations, dependencies, and your commits are persisted on the host across container runs.
## Launch a Dev Container
To launch a devcontainer from VSCode, open the RMM repo and select the "Reopen in Container" button in the bottom right:<br/><img src="https://user-images.githubusercontent.com/178183/221771999-97ab29d5-e718-4e5f-b32f-2cdd51bba25c.png"/>
Alternatively, open the VSCode command palette (typically `cmd/ctrl + shift + P`) and run the "Rebuild and Reopen in Container" command.
| 0 |
rapidsai_public_repos/rmm
|
rapidsai_public_repos/rmm/.devcontainer/Dockerfile
|
# syntax=docker/dockerfile:1.5
ARG BASE
ARG PYTHON_PACKAGE_MANAGER=conda
FROM ${BASE} as pip-base
ENV DEFAULT_VIRTUAL_ENV=rapids
FROM ${BASE} as conda-base
ENV DEFAULT_CONDA_ENV=rapids
FROM ${PYTHON_PACKAGE_MANAGER}-base
ARG CUDA
ENV CUDAARCHS="RAPIDS"
ENV CUDA_VERSION="${CUDA_VERSION:-${CUDA}}"
ARG PYTHON_PACKAGE_MANAGER
ENV PYTHON_PACKAGE_MANAGER="${PYTHON_PACKAGE_MANAGER}"
ENV PYTHONSAFEPATH="1"
ENV PYTHONUNBUFFERED="1"
ENV PYTHONDONTWRITEBYTECODE="1"
ENV SCCACHE_REGION="us-east-2"
ENV SCCACHE_BUCKET="rapids-sccache-devs"
ENV VAULT_HOST="https://vault.ops.k8s.rapids.ai"
ENV HISTFILE="/home/coder/.cache/._bash_history"
| 0 |
rapidsai_public_repos/rmm/.devcontainer
|
rapidsai_public_repos/rmm/.devcontainer/cuda11.8-pip/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "11.8",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda11.8-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/rmm,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda11.8-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
rapidsai_public_repos/rmm/.devcontainer
|
rapidsai_public_repos/rmm/.devcontainer/cuda12.0-pip/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.0",
"PYTHON_PACKAGE_MANAGER": "pip",
"BASE": "rapidsai/devcontainers:24.02-cpp-llvm16-cuda12.0-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config/pip,local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/rmm,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.local/share/${localWorkspaceFolderBasename}-cuda12.0-venvs,target=/home/coder/.local/share/venvs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
rapidsai_public_repos/rmm/.devcontainer
|
rapidsai_public_repos/rmm/.devcontainer/cuda12.0-conda/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "12.0",
"PYTHON_PACKAGE_MANAGER": "conda",
"BASE": "rapidsai/devcontainers:24.02-cpp-mambaforge-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda12.0-envs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/rmm,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda12.0-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
rapidsai_public_repos/rmm/.devcontainer
|
rapidsai_public_repos/rmm/.devcontainer/cuda11.8-conda/devcontainer.json
|
{
"build": {
"context": "${localWorkspaceFolder}/.devcontainer",
"dockerfile": "${localWorkspaceFolder}/.devcontainer/Dockerfile",
"args": {
"CUDA": "11.8",
"PYTHON_PACKAGE_MANAGER": "conda",
"BASE": "rapidsai/devcontainers:24.02-cpp-cuda11.8-mambaforge-ubuntu22.04"
}
},
"hostRequirements": {"gpu": "optional"},
"features": {
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils:24.2": {}
},
"overrideFeatureInstallOrder": [
"ghcr.io/rapidsai/devcontainers/features/rapids-build-utils"
],
"initializeCommand": ["/bin/bash", "-c", "mkdir -m 0755 -p ${localWorkspaceFolder}/../.{aws,cache,config,conda/pkgs,conda/${localWorkspaceFolderBasename}-cuda11.8-envs}"],
"postAttachCommand": ["/bin/bash", "-c", "if [ ${CODESPACES:-false} = 'true' ]; then . devcontainer-utils-post-attach-command; . rapids-post-attach-command; fi"],
"workspaceFolder": "/home/coder",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/coder/rmm,type=bind,consistency=consistent",
"mounts": [
"source=${localWorkspaceFolder}/../.aws,target=/home/coder/.aws,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.cache,target=/home/coder/.cache,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.config,target=/home/coder/.config,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/pkgs,target=/home/coder/.conda/pkgs,type=bind,consistency=consistent",
"source=${localWorkspaceFolder}/../.conda/${localWorkspaceFolderBasename}-cuda11.8-envs,target=/home/coder/.conda/envs,type=bind,consistency=consistent"
],
"customizations": {
"vscode": {
"extensions": [
"ms-python.flake8",
"nvidia.nsight-vscode-edition"
]
}
}
}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.